diff --git a/.fern/metadata.json b/.fern/metadata.json
new file mode 100644
index 00000000..eeb0a271
--- /dev/null
+++ b/.fern/metadata.json
@@ -0,0 +1,15 @@
+{
+ "cliVersion": "0.112.1",
+ "generatorName": "fernapi/fern-python-sdk",
+ "generatorVersion": "4.34.1",
+ "generatorConfig": {
+ "client": {
+ "class_name": "BaseClient",
+ "filename": "base_client.py",
+ "exported_class_name": "DeepgramClient",
+ "exported_filename": "client.py"
+ },
+ "use_typeddict_requests": true,
+ "should_generate_websocket_clients": true
+ }
+}
\ No newline at end of file
diff --git a/.fernignore b/.fernignore
index dfd18a20..e69de29b 100644
--- a/.fernignore
+++ b/.fernignore
@@ -1,33 +0,0 @@
-# Development, Configuration Files & Documentation
-README.md
-CONTRIBUTING.md
-.vscode/
-.gitignore
-mypy.ini
-websockets-reference.md
-.github/
-scripts/run_examples.sh
-docs/
-pyproject.toml
-CHANGELOG.md
-
-# Examples
-examples/
-
-# Test Files
-tests/unit/
-tests/integrations/
-
-# Custom Extensions & Clients
-src/deepgram/client.py
-src/deepgram/extensions/
-
-# Socket Client Implementations
-src/deepgram/agent/v1/socket_client.py
-src/deepgram/listen/v1/socket_client.py
-src/deepgram/listen/v2/socket_client.py
-src/deepgram/speak/v1/socket_client.py
-
-# Bug Fixes
-src/deepgram/listen/client.py
-src/deepgram/core/client_wrapper.py
\ No newline at end of file
diff --git a/.github/.commitlintrc.json b/.github/.commitlintrc.json
deleted file mode 100644
index 65411760..00000000
--- a/.github/.commitlintrc.json
+++ /dev/null
@@ -1,52 +0,0 @@
-{
- "extends": [
- "@commitlint/config-conventional"
- ],
- "rules": {
- "type-enum": [
- 2,
- "always",
- [
- "feat",
- "fix",
- "docs",
- "style",
- "refactor",
- "perf",
- "test",
- "build",
- "ci",
- "chore",
- "revert"
- ]
- ],
- "type-case": [
- 2,
- "always",
- "lower-case"
- ],
- "type-empty": [
- 2,
- "never"
- ],
- "scope-case": [
- 2,
- "always",
- "lower-case"
- ],
- "subject-empty": [
- 2,
- "never"
- ],
- "subject-full-stop": [
- 2,
- "never",
- "."
- ],
- "header-max-length": [
- 2,
- "always",
- 100
- ]
- }
-}
\ No newline at end of file
diff --git a/.github/.release-please-manifest.json b/.github/.release-please-manifest.json
deleted file mode 100644
index 41010749..00000000
--- a/.github/.release-please-manifest.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- ".": "5.3.0"
-}
\ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
deleted file mode 100644
index 67b5e5f4..00000000
--- a/.github/CODEOWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-# Global code owners - these users will be requested for review on all API SPEC PRs
-# DX TEAM Members
-* @lukeocodes
-
-# Future Reference: you can also specify owners for specific paths if needed:
-# /src/ @username1 @username2
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
deleted file mode 100644
index b9c29ebe..00000000
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ /dev/null
@@ -1,219 +0,0 @@
-name: "π Bug report"
-description: Report something that is broken or crashes
-title: "[Bug]: "
-labels: ["bug", "needs-triage"]
-assignees: []
-body:
- - type: markdown
- attributes:
- value: |
- Thanks for filing a bug. Please give a **minimal** repro when you can.
- - type: input
- id: summary
- attributes:
- label: Summary
- description: Short one-liner of the problem
- placeholder: "Crash when calling client.listen.v2 from a thread"
- validations:
- required: true
-
- - type: textarea
- id: what_happened
- attributes:
- label: What happened?
- description: Tell us what you saw and what you expected instead
- placeholder: |
- Actual:
- - β¦
-
- Expected:
- - β¦
- validations:
- required: true
-
- - type: textarea
- id: repro_steps
- attributes:
- label: Steps to reproduce
- description: Numbered steps; include inputs that matter (model, options, etc.)
- placeholder: |
- 1. Install deepgram-sdk==5.0.0
- 2. Run the code below
- 3. Observe error XYZ
- validations:
- required: true
-
- - type: textarea
- id: code
- attributes:
- label: Minimal code sample
- description: Small, runnable example (trim secrets). Attach a gist/repo if easier.
- render: python
- placeholder: |
- from deepgram import Deepgram
- # minimal snippet here
- validations:
- required: true
-
- - type: textarea
- id: logs
- attributes:
- label: Logs / traceback
- description: Full stack trace or error message (best with DEBUG logs)
- render: text
- placeholder: |
- Traceback (most recent call last):
- ...
- validations:
- required: false
-
- - type: dropdown
- id: transport
- attributes:
- label: Transport
- options:
- - HTTP
- - WebSocket
- - Both / Not sure
- validations:
- required: true
-
- - type: input
- id: endpoint
- attributes:
- label: API endpoint / path
- placeholder: "/v1/listen/β¦ or /v1/speak/β¦"
- validations:
- required: true
-
- - type: input
- id: model
- attributes:
- label: Model(s) used
- placeholder: "nova-2, aura-asteria-en, etc."
- validations:
- required: false
-
- - type: dropdown
- id: repro_rate
- attributes:
- label: How often?
- options:
- - Always
- - Often
- - Sometimes
- - Rarely
- - Only once
- validations:
- required: true
-
- - type: checkboxes
- id: regression
- attributes:
- label: Is this a regression?
- options:
- - label: "Yes, it worked in an earlier version"
- required: false
-
- - type: input
- id: worked_version
- attributes:
- label: Last working SDK version (if known)
- placeholder: "4.8.1"
- validations:
- required: false
-
- - type: input
- id: sdk_version
- attributes:
- label: SDK version
- placeholder: "5.0.0"
- validations:
- required: true
-
- - type: input
- id: python_version
- attributes:
- label: Python version
- placeholder: "3.10.14"
- validations:
- required: true
-
- - type: dropdown
- id: install_method
- attributes:
- label: Install method
- options:
- - pip
- - pipx
- - Poetry
- - uv
- - Conda
- - From source
- validations:
- required: false
-
- - type: dropdown
- id: os
- attributes:
- label: OS
- multiple: true
- options:
- - macOS (Intel)
- - macOS (Apple Silicon)
- - Linux (x86_64)
- - Linux (arm64)
- - Windows
- - Other
- validations:
- required: true
-
- - type: textarea
- id: extra_env
- attributes:
- label: Environment details
- description: Anything else? Docker, Fly.io, proxies, corporate network, etc.
- render: text
- validations:
- required: false
-
- - type: input
- id: repro_repo
- attributes:
- label: Link to minimal repro (optional)
- placeholder: "https://github.com/yourname/repro"
- validations:
- required: false
-
- - type: input
- id: session_id
- attributes:
- label: Session ID (optional)
- placeholder: "123e4567-e89b-12d3-a456-426614174000"
- validations:
- required: false
-
- - type: input
- id: project_id
- attributes:
- label: Project ID (optional)
- placeholder: "proj_abc123"
- validations:
- required: false
-
- - type: input
- id: request_id
- attributes:
- label: Request ID (optional)
- description: From API error messages or response headers (`x-dg-request-id`)
- placeholder: "req_def456"
- validations:
- required: false
-
- - type: checkboxes
- id: conduct
- attributes:
- label: Code of Conduct
- options:
- - label: I agree to follow this projectβs Code of Conduct
- required: true
diff --git a/.github/ISSUE_TEMPLATE/docs_improvement.yml b/.github/ISSUE_TEMPLATE/docs_improvement.yml
deleted file mode 100644
index 9a9125b3..00000000
--- a/.github/ISSUE_TEMPLATE/docs_improvement.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-name: "π Docs improvement"
-description: Fix or improve documentation, examples, or comments
-title: "[Docs]: "
-labels: ["documentation", "needs-triage"]
-body:
- - type: input
- id: page
- attributes:
- label: Affected page or section
- placeholder: "https://docs.example.com/python/β¦"
- validations:
- required: true
-
- - type: textarea
- id: issue
- attributes:
- label: What is unclear or wrong?
- placeholder: "Option X is outdated; code sample fails with 5.0.0"
- validations:
- required: true
-
- - type: textarea
- id: suggestion
- attributes:
- label: Suggested change
- render: markdown
- placeholder: "Replace snippet withβ¦ Add note about Python 3.12β¦"
- validations:
- required: false
-
- - type: textarea
- id: example
- attributes:
- label: Example code (if any)
- render: python
- placeholder: "# short snippet"
- validations:
- required: false
-
- - type: checkboxes
- id: parity
- attributes:
- label: SDK parity (if relevant)
- options:
- - label: This change may need updates in other SDKs
- required: false
-
- - type: input
- id: session_id
- attributes:
- label: Session ID (optional)
- placeholder: "123e4567-e89b-12d3-a456-426614174000"
- validations:
- required: false
-
- - type: input
- id: project_id
- attributes:
- label: Project ID (optional)
- placeholder: "proj_abc123"
- validations:
- required: false
-
- - type: input
- id: request_id
- attributes:
- label: Request ID (optional)
- description: From API error messages or response headers (`dg-request-id`)
- placeholder: "req_def456"
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
deleted file mode 100644
index cc3dd43b..00000000
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ /dev/null
@@ -1,100 +0,0 @@
-name: "β¨ Feature request"
-description: Suggest a new capability or API
-title: "[Feature]: "
-labels: ["enhancement", "needs-triage"]
-body:
- - type: input
- id: summary
- attributes:
- label: Summary
- placeholder: "Add async streaming helper for /v1/listen"
- validations:
- required: true
-
- - type: textarea
- id: problem
- attributes:
- label: Problem to solve
- description: What user problem does this address?
- placeholder: "Today I need to write a lot of boilerplate to stream audioβ¦"
- validations:
- required: true
-
- - type: textarea
- id: proposal
- attributes:
- label: Proposed solution
- description: API shape, flags, defaults. Keep it simple.
- render: python
- placeholder: |
- # Example
- async with client.listen.stream(model="nova-2") as s:
- await s.send_file("file.wav")
- async for msg in s:
- ...
- validations:
- required: true
-
- - type: textarea
- id: alternatives
- attributes:
- label: Alternatives considered
- placeholder: "Manual websockets; third-party lib; do nothing"
- validations:
- required: false
-
- - type: dropdown
- id: scope
- attributes:
- label: Scope
- options:
- - Python only
- - All SDKs (parity)
- - Docs/sample only
- validations:
- required: true
-
- - type: dropdown
- id: priority
- attributes:
- label: Priority
- options:
- - Nice to have
- - Important
- - High impact
- - Blocker
- validations:
- required: false
-
- - type: textarea
- id: context
- attributes:
- label: Extra context / links
- placeholder: "Related issues, forum threads, benchmarks, etc."
- validations:
- required: false
-
- - type: input
- id: session_id
- attributes:
- label: Session ID (optional)
- placeholder: "123e4567-e89b-12d3-a456-426614174000"
- validations:
- required: false
-
- - type: input
- id: project_id
- attributes:
- label: Project ID (optional)
- placeholder: "proj_abc123"
- validations:
- required: false
-
- - type: input
- id: request_id
- attributes:
- label: Request ID (optional)
- description: From API error messages or response headers (`dg-request-id`)
- placeholder: "req_def456"
- validations:
- required: false
diff --git a/.github/release-please-config.json b/.github/release-please-config.json
deleted file mode 100644
index 72e5b2ed..00000000
--- a/.github/release-please-config.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
- "packages": {
- ".": {
- "release-type": "python",
- "package-name": "deepgram-sdk",
- "tag-separator": "",
- "include-component-in-tag": false,
- "include-v-in-tag": true,
- "changelog-path": "CHANGELOG.md",
- "bump-minor-pre-major": false,
- "bump-patch-for-minor-pre-major": false,
- "draft": false,
- "extra-files": [
- {
- "type": "toml",
- "path": "pyproject.toml",
- "jsonpath": "$.tool.poetry.version"
- },
- {
- "type": "generic",
- "path": "src/deepgram/core/client_wrapper.py"
- }
- ]
- }
- },
- "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json"
-}
\ No newline at end of file
diff --git a/.github/workflows/changelog-log.yml b/.github/workflows/changelog-log.yml
deleted file mode 100644
index 61267df1..00000000
--- a/.github/workflows/changelog-log.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Changelog Log
-
-on:
- push:
- branches: [main]
- paths:
- - "**/CHANGELOG*.md"
- - "**/changelog*.md"
-
-jobs:
- notify:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Send changelog to webhook
- uses: lukeocodes/changelog-log@changelog-log-v0.1.7
- with:
- webhook_url: ${{ secrets.CHANGELOG_WEBHOOK_URL }}
- webhook_headers_json: '{"Content-Type":"application/json","X-DX-Logs-Key":"${{ secrets.CHANGELOG_SECRET }}"}'
- extra_body_json: '{"route":"changelog-python"}'
- # Project context is automatically inferred from GitHub context
- # You can override with:
- # project_name: "my-custom-project-name"
- # project_owner: "my-org"
- # repository_url: "https://github.com/owner/repo"
- # include_github_context: "true" # includes ref, workflow, actor info
- #
- # Other optional overrides:
- # file_globs: "CHANGELOG.md,**/CHANGELOG*.md"
- # entry_separator_regex: "^##\\s+.*$"
- # http_method: "POST"
- # include_body_raw: "false"
- # log_level: "warn" # trace, debug, info, warn, error, fatal
- # extra_body_json: '{"custom":"field"}' # merge custom fields into payload
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d160cad7..04557083 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,40 +1,28 @@
-name: CI
-
+name: ci
on: [push]
-
jobs:
- compile:
runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Set up python
- uses: actions/setup-python@v6
+ uses: actions/setup-python@v4
with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy src/
+ run: poetry run mypy .
test:
runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
- needs: compile
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Set up python
- uses: actions/setup-python@v6
+ uses: actions/setup-python@v4
with:
- python-version: ${{ matrix.python-version }}
+ python-version: 3.8
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
@@ -43,3 +31,27 @@ jobs:
- name: Test
run: poetry run pytest -rP .
+
+ publish:
+ needs: [compile, test]
+ if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+ - name: Set up python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ - name: Bootstrap poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
+ - name: Install dependencies
+ run: poetry install
+ - name: Publish to pypi
+ run: |
+ poetry config repositories.remote https://upload.pypi.org/legacy/
+ poetry --no-interaction -v publish --build --repository remote --username "$PYPI_USERNAME" --password "$PYPI_PASSWORD"
+ env:
+ PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
+ PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
diff --git a/.github/workflows/pr-title-check.yml b/.github/workflows/pr-title-check.yml
deleted file mode 100644
index 65163b77..00000000
--- a/.github/workflows/pr-title-check.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: Title Check
-
-on:
- pull_request:
- types:
- - opened
- - edited
- - synchronize
- - reopened
-
-jobs:
- title-check:
- name: Title Check
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
-
- - name: Install commitlint
- run: |
- npm install --save-dev @commitlint/config-conventional @commitlint/cli
-
- - name: Validate PR title
- env:
- PR_TITLE: ${{ github.event.pull_request.title }}
- run: |
- echo "$PR_TITLE" | npx commitlint -g .github/.commitlintrc.json
diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml
deleted file mode 100644
index 45521a8b..00000000
--- a/.github/workflows/release-please.yml
+++ /dev/null
@@ -1,89 +0,0 @@
-name: Release Please
-
-on:
- push:
- branches:
- - main # stable releases
-
-permissions:
- contents: write
- issues: write
- pull-requests: write
-
-jobs:
- compile:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
- - name: Set up python
- uses: actions/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
- run: |
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- - name: Install dependencies
- run: poetry install
- - name: Compile
- run: poetry run mypy src/
- test:
- runs-on: ubuntu-latest
- needs: compile
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
- - name: Set up python
- uses: actions/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
- run: |
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- - name: Install dependencies
- run: poetry install
- - name: Test
- run: poetry run pytest -rP .
- release-please:
- runs-on: ubuntu-latest
- needs: test
- outputs:
- release_created: ${{ steps.release.outputs.release_created }}
- steps:
- - uses: googleapis/release-please-action@v4
- id: release
- with:
- config-file: .github/release-please-config.json
- manifest-file: .github/.release-please-manifest.json
- target-branch: ${{ github.ref_name }}
- publish:
- runs-on: ubuntu-latest
- needs: release-please
- if: ${{ needs.release-please.outputs.release_created }}
- strategy:
- matrix:
- python-version: ["3.8"]
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
- - name: Set up python
- uses: actions/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
- run: |
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- - name: Install dependencies
- run: poetry install
- - name: Build package
- run: poetry build
- - name: Publish to PyPI
- env:
- POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
- run: poetry publish
diff --git a/.github/workflows/tests-daily.yml b/.github/workflows/tests-daily.yml
deleted file mode 100644
index a6acee9b..00000000
--- a/.github/workflows/tests-daily.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Daily Tests
-
-on:
- workflow_dispatch:
- schedule:
- - cron: "0 9 * * *"
-
-jobs:
- compile:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
- - name: Set up python
- uses: actions/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
- run: |
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- - name: Install dependencies
- run: poetry install
- - name: Compile
- run: poetry run mypy src/
- test:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
- needs: compile
- steps:
- - name: Checkout repo
- uses: actions/checkout@v4
- - name: Set up python
- uses: actions/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
- - name: Bootstrap poetry
- run: |
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- - name: Install dependencies
- run: poetry install
-
- - name: Test
- run: poetry run pytest -rP .
diff --git a/.gitignore b/.gitignore
index 96eb8bdb..d2e4ca80 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,17 +3,3 @@
__pycache__/
dist/
poetry.toml
-.env
-.pytest_cache/
-
-# ignore example output files
-examples/**/output.*
-
-# ignore venv
-venv/
-.DS_Store
-
-# ignore build artifacts and dependencies
-Pipfile
-Pipfile.lock
-deepgram_sdk.egg-info/
diff --git a/CHANGELOG.md b/CHANGELOG.md
deleted file mode 100644
index 9c135f4b..00000000
--- a/CHANGELOG.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Changelog
-
-## [5.3.0](https://github.com/deepgram/deepgram-python-sdk/compare/v5.2.0...v5.3.0) (2025-10-30)
-
-
-### Features
-
-* add projects billing fields list methods ([#621](https://github.com/deepgram/deepgram-python-sdk/issues/621)) ([10d67cd](https://github.com/deepgram/deepgram-python-sdk/commit/10d67cd91aef1436a9e85e3b607dc7b81eebba43))
-
-## [5.2.0](https://github.com/deepgram/deepgram-python-sdk/compare/v5.1.0...v5.2.0) (2025-10-21)
-
-
-### Features
-
-* SDK regeneration (21 Oct 2025) ([#609](https://github.com/deepgram/deepgram-python-sdk/issues/609)) ([5b21460](https://github.com/deepgram/deepgram-python-sdk/commit/5b2146058842fe4dc6d6ef4bd9c0777b08f48fab))
-
-## [5.1.0](https://github.com/deepgram/deepgram-python-sdk/compare/v5.0.0...v5.1.0) (2025-10-16)
-
-
-### Features
-
-* mention keep alive in migration guide ([#594](https://github.com/deepgram/deepgram-python-sdk/issues/594)) ([5a8c79e](https://github.com/deepgram/deepgram-python-sdk/commit/5a8c79e814e3efeb81a8c51a0a05d93bc17e6bb5))
-* update the SDK with upstream spec changes ([d77ad96](https://github.com/deepgram/deepgram-python-sdk/commit/d77ad966db62e068fb6e346d247299bc9efd1bd5))
-
-
-### Bug Fixes
-
-* **ci:** reference the correct secret ([#585](https://github.com/deepgram/deepgram-python-sdk/issues/585)) ([09550c7](https://github.com/deepgram/deepgram-python-sdk/commit/09550c7c43b6778d52030bd70a48905c425d1365))
-* corrects order to the release workflow ([#583](https://github.com/deepgram/deepgram-python-sdk/issues/583)) ([3abbac3](https://github.com/deepgram/deepgram-python-sdk/commit/3abbac3271e77e718dde19580a16cdf915c263df))
-* remove testpypi we don't need it in the workflow ([#582](https://github.com/deepgram/deepgram-python-sdk/issues/582)) ([b2e2538](https://github.com/deepgram/deepgram-python-sdk/commit/b2e2538cb9528f48e9a20a839763ff82fe40ab8b))
-* support multiple keyterms for v2 listen client ([#595](https://github.com/deepgram/deepgram-python-sdk/issues/595)) ([7a9d41d](https://github.com/deepgram/deepgram-python-sdk/commit/7a9d41d2b5a48dd094ca20e7f5a227afbdd46dc0))
-
-## [5.0.0](https://github.com/deepgram/deepgram-python-sdk/compare/v4.8.1...v5.0.0) (2025-10-02)
-
-
-### β BREAKING CHANGES
-
-* This is a significant breaking change, and should be carried out in conjunction with our migration guide.
-
-### Features
-
-* implements new generated SDK architecture, all call signatures ([#572](https://github.com/deepgram/deepgram-python-sdk/issues/572)) ([768d514](https://github.com/deepgram/deepgram-python-sdk/commit/768d51492bf7414067266cdc2cf7b98f1f3981dc))
-
-
-### Bug Fixes
-
-* release-please config fixes ([#579](https://github.com/deepgram/deepgram-python-sdk/issues/579)) ([a603806](https://github.com/deepgram/deepgram-python-sdk/commit/a6038067596f1643cd5c7255f0e5a7ede1ff43fb))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index a16a1b18..00000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Contributing
-
-Contributions are welcome. This is a generated library, and changes to core files should be promoted to our generator code.
-
-Requires Python 3.8+
-
-## Fork Repository
-
-Fork this repo on GitHub.
-
-## Clone Repository
-
-```bash
-git clone https://github.com/YOUR_USERNAME/deepgram-python-sdk.git
-cd deepgram-python-sdk
-```
-
-## Install Poetry
-
-```bash
-curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
-```
-
-Ensure Poetry is in your `$PATH`.
-
-## Install Dependencies
-
-```bash
-poetry install
-```
-
-## Run Tests
-
-```bash
-poetry run pytest -rP .
-```
-
-## Install Example Dependencies
-
-```bash
-poetry run pip install -r examples/requirements.txt
-```
-
-## Run Example
-
-```bash
-poetry run python -u examples/listen/media/transcribe_url/main.py
-```
-
-## Commit Changes
-
-```bash
-git add .
-git commit -m "feat: your change description"
-```
-
-## Push to Fork
-
-```bash
-git push origin main
-```
-
-## Create Pull Request
-
-Open a pull request from your fork to the main repository.
diff --git a/README.md b/README.md
index 35196939..570243d0 100644
--- a/README.md
+++ b/README.md
@@ -1,179 +1,59 @@
-# Deepgram Python SDK
-
-
-[](https://pypi.python.org/pypi/deepgram-sdk)
-[](https://www.python.org/downloads/)
-[](./LICENSE)
-
-The official Python SDK for Deepgram's automated speech recognition, text-to-speech, and language understanding APIs. Power your applications with world-class speech and Language AI models.
+# Deepgram API Python Library
+
+
+
+[](https://buildwithfern.com?utm_source=github&utm_medium=github&utm_campaign=readme&utm_source=https%3A%2F%2Fgithub.com%2Fdeepgram%2Fdeepgram-python-sdk)
+[](https://pypi.python.org/pypi/deepgram-sdk)
+
+Power your apps with world-class speech and Language AI models
+
+## Table of Contents
+
+- [Documentation](#documentation)
+- [Installation](#installation)
+- [Reference](#reference)
+- [Usage](#usage)
+- [Authentication](#authentication)
+- [Async Client](#async-client)
+- [Exception Handling](#exception-handling)
+- [Advanced Features](#advanced-features)
+- [Websockets](#websockets)
+- [Advanced](#advanced)
+ - [Access Raw Response Data](#access-raw-response-data)
+ - [Retries](#retries)
+ - [Timeouts](#timeouts)
+ - [Custom Client](#custom-client)
+- [Contributing](#contributing)
+- [Community Code of Conduct](#community-code-of-conduct)
+- [License](#license)
## Documentation
-Comprehensive API documentation and guides are available at [developers.deepgram.com](https://developers.deepgram.com).
-
-### Migrating From Earlier Versions
-
-- [v2 to v3+](./docs/Migrating-v2-to-v3.md)
-- [v3+ to v5](./docs/Migrating-v3-to-v5.md) (current)
+API reference documentation is available [here](https://developers.deepgram.com/reference/deepgram-api-overview).
## Installation
-Install the Deepgram Python SDK using pip:
-
-```bash
+```sh
pip install deepgram-sdk
```
## Reference
-- **[API Reference](./reference.md)** - Complete reference for all SDK methods and parameters
-- **[WebSocket Reference](./websockets-reference.md)** - Detailed documentation for real-time WebSocket connections
+A full reference for this library is available [here](https://github.com/deepgram/deepgram-python-sdk/blob/HEAD/./reference.md).
## Usage
-### Quick Start
-
-The Deepgram SDK provides both synchronous and asynchronous clients for all major use cases:
-
-#### Real-time Speech Recognition (Listen v2)
-
-Our newest and most advanced speech recognition model with contextual turn detection ([WebSocket Reference](./websockets-reference.md#listen-v2-connect)):
-
-```python
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-
-client = DeepgramClient()
-
-with client.listen.v2.connect(
- model="flux-general-en",
- encoding="linear16",
- sample_rate="16000"
-) as connection:
- def on_message(message):
- print(f"Received {message.type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Error: {error}"))
-
- # Start listening and send audio data
- connection.start_listening()
-```
-
-#### File Transcription
-
-Transcribe pre-recorded audio files ([API Reference](./reference.md#listen-v1-media-transcribe-file)):
-
-```python
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-with open("audio.wav", "rb") as audio_file:
- response = client.listen.v1.media.transcribe_file(
- request=audio_file.read(),
- model="nova-3"
- )
- print(response.results.channels[0].alternatives[0].transcript)
-```
-
-#### Text-to-Speech
-
-Generate natural-sounding speech from text ([API Reference](./reference.md#speak-v1-audio-generate)):
-
-```python
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-response = client.speak.v1.audio.generate(
- text="Hello, this is a sample text to speech conversion."
-)
-
-# Save the audio file
-with open("output.mp3", "wb") as audio_file:
- audio_file.write(response.stream.getvalue())
-```
-
-#### Text Analysis
-
-Analyze text for sentiment, topics, and intents ([API Reference](./reference.md#read-v1-text-analyze)):
+Instantiate and use the client with the following:
```python
from deepgram import DeepgramClient
-client = DeepgramClient()
-
-response = client.read.v1.text.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True
-)
-```
-
-#### Voice Agent (Conversational AI)
-
-Build interactive voice agents ([WebSocket Reference](./websockets-reference.md#agent-v1-connect)):
-
-```python
-from deepgram import DeepgramClient
-from deepgram.extensions.types.sockets import (
- AgentV1SettingsMessage, AgentV1Agent, AgentV1AudioConfig,
- AgentV1AudioInput, AgentV1Listen, AgentV1ListenProvider,
- AgentV1Think, AgentV1OpenAiThinkProvider, AgentV1SpeakProviderConfig,
- AgentV1DeepgramSpeakProvider
+client = DeepgramClient(
+ api_key="YOUR_API_KEY",
)
-
-client = DeepgramClient()
-
-with client.agent.v1.connect() as agent:
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(encoding="linear16", sample_rate=44100)
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(type="deepgram", model="nova-3")
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai", model="gpt-4o-mini"
- )
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram", model="aura-2-asteria-en"
- )
- )
- )
- )
-
- agent.send_settings(settings)
- agent.start_listening()
+client.listen.v1.media.transcribe_file()
```
-### Complete SDK Reference
-
-For comprehensive documentation of all available methods, parameters, and options:
-
-- **[API Reference](./reference.md)** - Complete reference for REST API methods including:
-
- - Listen (Speech-to-Text): File transcription, URL transcription, and media processing
- - Speak (Text-to-Speech): Audio generation and voice synthesis
- - Read (Text Intelligence): Text analysis, sentiment, summarization, and topic detection
- - Manage: Project management, API keys, and usage analytics
- - Auth: Token generation and authentication management
-
-- **[WebSocket Reference](./websockets-reference.md)** - Detailed documentation for real-time connections:
- - Listen v1/v2: Real-time speech recognition with different model capabilities
- - Speak v1: Real-time text-to-speech streaming
- - Agent v1: Conversational voice agents with integrated STT, LLM, and TTS
-
## Authentication
The Deepgram SDK supports two authentication methods:
@@ -222,58 +102,38 @@ The SDK automatically discovers credentials from these environment variables:
## Async Client
-The SDK provides full async/await support for non-blocking operations:
+The SDK also exports an `async` client so that you can make non-blocking calls to our API. Note that if you are constructing an Async httpx client class to pass into this client, use `httpx.AsyncClient()` instead of `httpx.Client()` (e.g. for the `httpx_client` parameter of this client).
```python
import asyncio
+
from deepgram import AsyncDeepgramClient
-async def main():
- client = AsyncDeepgramClient()
+client = AsyncDeepgramClient(
+ api_key="YOUR_API_KEY",
+)
- # Async file transcription
- with open("audio.wav", "rb") as audio_file:
- response = await client.listen.v1.media.transcribe_file(
- request=audio_file.read(),
- model="nova-3"
- )
- # Async WebSocket connection
- async with client.listen.v2.connect(
- model="flux-general-en",
- encoding="linear16",
- sample_rate="16000"
- ) as connection:
- async def on_message(message):
- print(f"Received {message.type} event")
+async def main() -> None:
+ await client.listen.v1.media.transcribe_file()
- connection.on(EventType.MESSAGE, on_message)
- await connection.start_listening()
asyncio.run(main())
```
## Exception Handling
-The SDK provides detailed error information for debugging and error handling:
+When the API returns a non-success status code (4xx or 5xx response), a subclass of the following error
+will be thrown.
```python
-from deepgram import DeepgramClient
from deepgram.core.api_error import ApiError
-client = DeepgramClient()
-
try:
- response = client.listen.v1.media.transcribe_file(
- request=audio_data,
- model="nova-3"
- )
+ client.listen.v1.media.transcribe_file(...)
except ApiError as e:
- print(f"Status Code: {e.status_code}")
- print(f"Error Details: {e.body}")
- print(f"Request ID: {e.headers.get('x-dg-request-id', 'N/A')}")
-except Exception as e:
- print(f"Unexpected error: {e}")
+ print(e.status_code)
+ print(e.body)
```
## Advanced Features
@@ -346,50 +206,147 @@ response = client.listen.v1.media.transcribe_file(
)
```
-## Contributing
+## Websockets
+
+The SDK supports both sync and async websocket connections for real-time, low-latency communication. Sockets can be created using the `connect` method, which returns a context manager.
+You can either iterate through the returned `SocketClient` to process messages as they arrive, or attach handlers to respond to specific events.
+
+```python
+
+# Connect to the websocket (Sync)
+import threading
+
+from deepgram import DeepgramClient
+
+client = DeepgramClient(...)
+
+with client.v1.connect() as socket:
+ # Iterate over the messages as they arrive
+ for message in socket
+ print(message)
+
+ # Or, attach handlers to specific events
+ socket.on(EventType.OPEN, lambda _: print("open"))
+ socket.on(EventType.MESSAGE, lambda message: print("received message", message))
+ socket.on(EventType.CLOSE, lambda _: print("close"))
+ socket.on(EventType.ERROR, lambda error: print("error", error))
+
-We welcome contributions to improve this SDK! However, please note that this library is primarily generated from our API specifications.
+ # Start the listening loop in a background thread
+ listener_thread = threading.Thread(target=socket.start_listening, daemon=True)
+ listener_thread.start()
+```
-### Development Setup
+```python
-1. **Install Poetry** (if not already installed):
+# Connect to the websocket (Async)
+import asyncio
- ```bash
- curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
- ```
+from deepgram import AsyncDeepgramClient
-2. **Install dependencies**:
+client = AsyncDeepgramClient(...)
- ```bash
- poetry install
- ```
+async with client.v1.connect() as socket:
+ # Iterate over the messages as they arrive
+ async for message in socket
+ print(message)
-3. **Install example dependencies**:
+ # Or, attach handlers to specific events
+ socket.on(EventType.OPEN, lambda _: print("open"))
+ socket.on(EventType.MESSAGE, lambda message: print("received message", message))
+ socket.on(EventType.CLOSE, lambda _: print("close"))
+ socket.on(EventType.ERROR, lambda error: print("error", error))
- ```bash
- poetry run pip install -r examples/requirements.txt
- ```
-4. **Run tests**:
+ # Start listening for events in an asyncio task
+ listen_task = asyncio.create_task(socket.start_listening())
+```
- ```bash
- poetry run pytest -rP .
- ```
+## Advanced
-5. **Run examples**:
- ```bash
- python -u examples/listen/v2/connect/main.py
- ```
+### Access Raw Response Data
-### Contribution Guidelines
+The SDK provides access to raw response data, including headers, through the `.with_raw_response` property.
+The `.with_raw_response` property returns a "raw" client that can be used to access the `.headers` and `.data` attributes.
-See our [CONTRIBUTING](./CONTRIBUTING.md) guide.
+```python
+from deepgram import DeepgramClient
-### Requirements
+client = DeepgramClient(
+ ...,
+)
+response = client.listen.v1.media.with_raw_response.transcribe_file(...)
+print(response.headers) # access the response headers
+print(response.data) # access the underlying object
+```
+
+### Retries
+
+The SDK is instrumented with automatic retries with exponential backoff. A request will be retried as long
+as the request is deemed retryable and the number of retry attempts has not grown larger than the configured
+retry limit (default: 2).
-- Python 3.8+
-- See `pyproject.toml` for full dependency list
+A request is deemed retryable when any of the following HTTP status codes is returned:
+- [408](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/408) (Timeout)
+- [429](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429) (Too Many Requests)
+- [5XX](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500) (Internal Server Errors)
+
+Use the `max_retries` request option to configure this behavior.
+
+```python
+client.listen.v1.media.transcribe_file(..., request_options={
+ "max_retries": 1
+})
+```
+
+### Timeouts
+
+The SDK defaults to a 60 second timeout. You can configure this with a timeout option at the client or request level.
+
+```python
+
+from deepgram import DeepgramClient
+
+client = DeepgramClient(
+ ...,
+ timeout=20.0,
+)
+
+
+# Override timeout for a specific method
+client.listen.v1.media.transcribe_file(..., request_options={
+ "timeout_in_seconds": 1
+})
+```
+
+### Custom Client
+
+You can override the `httpx` client to customize it for your use-case. Some common use-cases include support for proxies
+and transports.
+
+```python
+import httpx
+from deepgram import DeepgramClient
+
+client = DeepgramClient(
+ ...,
+ httpx_client=httpx.Client(
+ proxy="http://my.test.proxy.example.com",
+ transport=httpx.HTTPTransport(local_address="0.0.0.0"),
+ ),
+)
+```
+
+## Contributing
+
+While we value open-source contributions to this SDK, this library is generated programmatically.
+Additions made directly to this library would have to be moved over to our generation code,
+otherwise they would be overwritten upon the next generated release. Feel free to open a PR as
+a proof of concept, but know that we will not be able to merge it as-is. We suggest opening
+an issue first to discuss with us!
+
+On the other hand, contributions to the README are always very welcome!
## Community Code of Conduct
Please see our community [code of conduct](https://developers.deepgram.com/code-of-conduct) before contributing to this project.
@@ -397,3 +354,4 @@ Please see our community [code of conduct](https://developers.deepgram.com/code-
## License
This project is licensed under the MIT License - see the [LICENSE](./LICENSE) file for details.
+
diff --git a/docs/Migrating-v2-to-v3.md b/docs/Migrating-v2-to-v3.md
deleted file mode 100644
index 1ed9e4d7..00000000
--- a/docs/Migrating-v2-to-v3.md
+++ /dev/null
@@ -1,529 +0,0 @@
-# v2 to v3+ Migration Guide
-
-This guide helps you migrate from Deepgram Python SDK v2 to v3+ (versions 3.0.0 and above). The v3+ release introduces significant improvements including better structure, sync/async support, improved error handling, and support for future products.
-
-## Table of Contents
-
-- [Installation](#installation)
-- [Configuration Changes](#configuration-changes)
-- [API Method Changes](#api-method-changes)
- - [Listen V1](#listen-v1)
- - [Manage V1](#manage-v1)
-- [Breaking Changes Summary](#breaking-changes-summary)
-
-## Installation
-
-The package name remains the same:
-
-```bash
-pip install deepgram-sdk
-```
-
-To upgrade from v2 to v3+:
-
-```bash
-pip install --upgrade deepgram-sdk
-```
-
-## Configuration Changes
-
-### v2 Client Initialization
-
-```python
-from deepgram import Deepgram
-
-# Your Deepgram API Key
-DEEPGRAM_API_KEY = 'YOUR_DEEPGRAM_API_KEY'
-
-# Initialize the Deepgram SDK
-deepgram = Deepgram(DEEPGRAM_API_KEY)
-```
-
-### v3+ Client Initialization
-
-```python
-from deepgram import DeepgramClient
-
-# Create a Deepgram client using the DEEPGRAM_API_KEY from environment variables
-deepgram = DeepgramClient()
-
-# Or with explicit API key
-deepgram = DeepgramClient(api_key="YOUR_API_KEY")
-```
-
-## API Method Changes
-
-### Listen V1
-
-#### Transcribe File
-
-**v2**
-
-```python
-FILE = 'interview_speech-analytics.wav'
-
-# Open the audio file
-audio = open(FILE, 'rb')
-
-# Set the source
-source = {
- 'buffer': audio,
-}
-
-# Send the audio to Deepgram and get the response
-response = await asyncio.create_task(
- deepgram.transcription.prerecorded(
- source,
- {
- 'smart_format': "true",
- 'summarize': "v2",
- }
- )
-)
-
-# Write the response to the console
-print(json.dumps(response, indent=4))
-```
-
-**v3+**
-
-```python
-from deepgram import PrerecordedOptions, FileSource
-
-AUDIO_FILE = "preamble.wav"
-
-# Call the transcribe_file method on the prerecorded class
-with open(AUDIO_FILE, "rb") as file:
- buffer_data = file.read()
-
-payload: FileSource = {
- "buffer": buffer_data,
-}
-
-options = PrerecordedOptions(
- smart_format=True,
- summarize="v2",
-)
-file_response = deepgram.listen.rest.v("1").transcribe_file(payload, options)
-
-json = file_response.to_json()
-print(f"{json}")
-```
-
-#### Transcribe URL
-
-**v2**
-
-```python
-URL = 'https://static.deepgram.com/examples/interview_speech-analytics.wav'
-
-# Set the source
-source = {
- 'url': URL,
-}
-
-# Send the audio to Deepgram and get the response
-response = await asyncio.create_task(
- deepgram.transcription.prerecorded(
- source,
- {
- 'smart_format': "true",
- 'summarize': "v2",
- }
- )
-)
-
-# Write the response to the console
-print(json.dumps(response, indent=4))
-```
-
-**v3+**
-
-```python
-from deepgram import PrerecordedOptions, UrlSource
-
-AUDIO_URL = {
- "url": "https://static.deepgram.com/examples/Bueller-Life-moves-pretty-fast.wav"
-}
-
-options = PrerecordedOptions(
- smart_format=True,
- summarize="v2",
-)
-url_response = deepgram.listen.rest.v("1").transcribe_url(AUDIO_URL, options)
-
-json = url_response.to_json()
-print(f"{json}")
-```
-
-#### WebSocket Streaming (Listen V1)
-
-**v2**
-
-```python
-try:
- deepgramLive = await deepgram.transcription.live({
- 'smart_format': True,
- 'interim_results': False,
- 'language': 'en-US',
- 'model': 'nova-3',
- })
-except Exception as e:
- print(f'Could not open socket: {e}')
- return
-
-# Listen for the connection to close
-deepgramLive.registerHandler(deepgramLive.event.CLOSE, lambda c: print(
- f'Connection closed with code {c}.'))
-
-# Listen for any transcripts received from Deepgram and write them to the console
-deepgramLive.registerHandler(deepgramLive.event.TRANSCRIPT_RECEIVED, print)
-
-# Listen for the connection to open and send streaming audio from the URL to Deepgram
-async with aiohttp.ClientSession() as session:
- async with session.get(URL) as audio:
- while True:
- data = await audio.content.readany()
- deepgramLive.send(data)
-
- # If no data is being sent from the live stream, then break out of the loop.
- if not data:
- break
-
-# Indicate that we've finished sending data
-await deepgramLive.finish()
-```
-
-**v3+**
-
-```python
-import threading
-import httpx
-from deepgram import DeepgramClient, LiveOptions, LiveTranscriptionEvents
-
-try:
- deepgram: DeepgramClient = DeepgramClient()
-
- dg_connection = deepgram.listen.websocket.v("1")
-
- # define callbacks for transcription messages
- def on_message(self, result, **kwargs):
- sentence = result.channel.alternatives[0].transcript
- if len(sentence) == 0:
- return
- print(f"speaker: {sentence}")
-
- dg_connection.on(LiveTranscriptionEvents.Transcript, on_message)
-
- # connect to websocket
- options = LiveOptions(model="nova-3", interim_results=False, language="en-US")
- dg_connection.start(options)
-
- lock_exit = threading.Lock()
- exit = False
-
- # define a worker thread
- def myThread():
- with httpx.stream("GET", URL) as r:
- for data in r.iter_bytes():
- lock_exit.acquire()
- if exit:
- break
- lock_exit.release()
-
- dg_connection.send(data)
-
- # start the worker thread
- myHttp = threading.Thread(target=myThread)
- myHttp.start()
-
- # signal finished
- input("Press Enter to stop recording...\n\n")
- lock_exit.acquire()
- exit = True
- lock_exit.release()
-
- # Wait for the HTTP thread to close and join
- myHttp.join()
-
- # Indicate that we've finished
- dg_connection.finish()
-
-except Exception as e:
- print(f"Could not open socket: {e}")
- return
-```
-
-### Manage V1
-
-#### Projects
-
-**v2**
-
-```python
-# Get projects
-result = await deepgram.projects.list()
-
-# Get project
-result = await deepgram.projects.get("550e8400-e29b-41d4-a716-446655440000")
-
-# Update project
-result = await deepgram.projects.update(object)
-
-# Delete project
-result = await deepgram.projects.delete("550e8400-e29b-41d4-a716-446655440000")
-```
-
-**v3+**
-
-```python
-# Get projects
-result = deepgram.manage.v("1").get_projects()
-
-# Get project
-result = deepgram.manage.v("1").get_project("550e8400-e29b-41d4-a716-446655440000")
-
-# Update project
-result = deepgram.manage.v("1").update_project("550e8400-e29b-41d4-a716-446655440000", name="My TEST RENAME Example")
-
-# Delete project
-result = deepgram.manage.v("1").delete_project("550e8400-e29b-41d4-a716-446655440000")
-```
-
-#### Keys
-
-**v2**
-
-```python
-# List keys
-result = await deepgram.keys.list("550e8400-e29b-41d4-a716-446655440000")
-
-# Get key
-result = await deepgram.keys.get("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
-# Create key
-result = await deepgram.keys.create("550e8400-e29b-41d4-a716-446655440000", "MyTestKey", ["member"])
-
-# Delete key
-result = await deepgram.keys.delete("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v3+**
-
-```python
-from deepgram import KeyOptions
-
-# List keys
-result = deepgram.manage.v("1").get_keys("550e8400-e29b-41d4-a716-446655440000")
-
-# Get key
-result = deepgram.manage.v("1").get_key("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
-# Create key
-options = KeyOptions(
- comment="MyTestKey",
- scopes=["member"],
-)
-result = deepgram.manage.v("1").create_key("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete key
-result = deepgram.manage.v("1").delete_key("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-```
-
-#### Members
-
-**v2**
-
-```python
-# Get members
-result = await deepgram.members.list_members("550e8400-e29b-41d4-a716-446655440000")
-
-# Remove member
-result = await deepgram.members.remove_member("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v3+**
-
-```python
-# Get members
-result = deepgram.manage.v("1").get_members("550e8400-e29b-41d4-a716-446655440000")
-
-# Remove member
-result = deepgram.manage.v("1").remove_member("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-```
-
-#### Scopes
-
-**v2**
-
-```python
-# Get member scopes
-result = await deepgram.scopes.get_scope("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-
-# Update scope
-result = await deepgram.scopes.update_scope("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8", 'member')
-```
-
-**v3+**
-
-```python
-from deepgram import ScopeOptions
-
-# Get member scopes
-result = deepgram.manage.v("1").get_member_scopes("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-
-# Update scope
-options = ScopeOptions(
- scope="admin"
-)
-result = deepgram.manage.v("1").update_member_scope("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8", options)
-```
-
-#### Invitations
-
-**v2**
-
-```python
-# List invites
-result = await deepgram.invitations.list_invitations("550e8400-e29b-41d4-a716-446655440000")
-
-# Send invite
-result = await deepgram.invitations.send_invitation("550e8400-e29b-41d4-a716-446655440000", {
- 'email': 'hello@deepgram.com',
- 'scope': 'member',
-})
-
-# Delete invite
-result = await deepgram.invitations.remove_invitation("550e8400-e29b-41d4-a716-446655440000", 'hello@deepgram.com')
-
-# Leave project
-result = await deepgram.invitation.leave_project("550e8400-e29b-41d4-a716-446655440000")
-```
-
-**v3+**
-
-```python
-from deepgram import InviteOptions
-
-# List invites
-result = deepgram.manage.v("1").get_invites("550e8400-e29b-41d4-a716-446655440000")
-
-# Send invite
-options = InviteOptions(
- email="hello@deepgram.com",
- scope="member"
-)
-result = deepgram.manage.v("1").send_invite_options("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete invite
-result = deepgram.manage.v("1").delete_invite("550e8400-e29b-41d4-a716-446655440000", "hello@deepgram.com")
-
-# Leave project
-result = deepgram.manage.v("1").leave_project("550e8400-e29b-41d4-a716-446655440000")
-```
-
-#### Usage
-
-**v2**
-
-```python
-# Get all requests
-result = await deepgram.usage.list_requests("550e8400-e29b-41d4-a716-446655440000", {
- 'limit': 10,
- # other options are available
-})
-
-# Get request
-result = await deepgram.usage.get_request("550e8400-e29b-41d4-a716-446655440000", "6ba7b812-9dad-11d1-80b4-00c04fd430c8")
-
-# Get usage summary
-result = await deepgram.usage.get_usage("550e8400-e29b-41d4-a716-446655440000", {
- 'start': '2020-01-01T00:00:00+00:00',
- # other options are available
-})
-
-# Get usage fields
-result = await deepgram.usage.get_fields("550e8400-e29b-41d4-a716-446655440000", {
- 'start': '2020-01-01T00:00:00+00:00',
- # other options are available
-})
-```
-
-**v3+**
-
-```python
-# Get all requests
-result = deepgram.manage.v("1").get_usage_requests("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Get request
-result = deepgram.manage.v("1").get_usage_request("550e8400-e29b-41d4-a716-446655440000", "6ba7b812-9dad-11d1-80b4-00c04fd430c8")
-
-# Get usage summary
-result = deepgram.manage.v("1").get_usage_summary("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Get usage fields
-result = deepgram.manage.v("1").get_usage_fields("550e8400-e29b-41d4-a716-446655440000", options)
-```
-
-#### Billing
-
-**v2**
-
-```python
-# Get all balances
-result = await deepgram.billing.list_balance("550e8400-e29b-41d4-a716-446655440000")
-
-# Get balance
-result = await deepgram.billing.get_balance("550e8400-e29b-41d4-a716-446655440000", "6ba7b813-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v3+**
-
-```python
-# Get all balances
-result = deepgram.manage.v("1").get_balances("550e8400-e29b-41d4-a716-446655440000")
-
-# Get balance
-result = deepgram.manage.v("1").get_balance("550e8400-e29b-41d4-a716-446655440000", "6ba7b813-9dad-11d1-80b4-00c04fd430c8")
-```
-
-## Breaking Changes Summary
-
-### Major Changes
-
-1. **SDK Structure**: Complete restructure with improved organization
-2. **Client Initialization**: New `DeepgramClient` class with environment variable support
-3. **API Structure**: New versioned API structure with `v("1")` pattern
-4. **Sync/Async Support**: Both synchronous and asynchronous classes and methods
-5. **Options Objects**: New typed options objects for better parameter management
-6. **WebSocket Implementation**: Improved live client with better abstractions
-7. **Error Handling**: Enhanced error handling and logging capabilities
-
-### Removed Features
-
-- Old `Deepgram` client class (replaced with `DeepgramClient`)
-- Direct async/await methods on main client (moved to versioned structure)
-- Old event handling system (replaced with new event system)
-
-### New Features in v3+
-
-- **Improved Live Client**: Better WebSocket abstractions
-- **Verbosity Logging**: Enhanced logging levels for troubleshooting
-- **Custom Headers/Query Parameters**: Support for custom API parameters
-- **Future Product Support**: Architecture ready for new APIs
-- **Better Type Safety**: Typed options objects and responses
-
-### Migration Checklist
-
-- [ ] Upgrade to latest version: `pip install --upgrade deepgram-sdk`
-- [ ] Replace `Deepgram` with `DeepgramClient`
-- [ ] Update API method calls to new versioned structure
-- [ ] Replace direct parameters with options objects
-- [ ] Update WebSocket event handling to new system
-- [ ] Update error handling for new exception types
-- [ ] Test all functionality with new API structure
-
-### Notes
-
-- WebVTT and SRT captions are now available as a standalone package: [deepgram-python-captions](https://github.com/deepgram/deepgram-python-captions)
-- Self-hosted API functionality remains unchanged but may have breaking changes in v4
diff --git a/docs/Migrating-v3-to-v5.md b/docs/Migrating-v3-to-v5.md
deleted file mode 100644
index bcaaab2d..00000000
--- a/docs/Migrating-v3-to-v5.md
+++ /dev/null
@@ -1,929 +0,0 @@
-# v3+ to v5 Migration Guide
-
-This guide helps you migrate from Deepgram Python SDK v3+ (versions 3.0.0 to 4.8.1) to v5.0.0. The v5 release introduces significant improvements including better type safety, cleaner API design, and enhanced WebSocket functionality.
-
-## Table of Contents
-
-- [Installation Changes](#installation-changes)
-- [Configuration Changes](#configuration-changes)
-- [Authentication Changes](#authentication-changes)
-- [API Method Changes](#api-method-changes)
- - [Auth V1](#auth-v1)
- - [Listen V1](#listen-v1)
- - [Speak V1](#speak-v1)
- - [Agent V1](#agent-v1)
- - [Read V1](#read-v1)
- - [Models V1](#models-v1)
- - [Manage V1](#manage-v1)
- - [Self-Hosted V1](#self-hosted-v1)
-- [Keep Alive Functionality](#websocket-keep-alive-functionality)
-- [Breaking Changes Summary](#breaking-changes-summary)
-
-## Installation
-
-To upgrade from v3+ to v5.0.0:
-
-```bash
-pip install --upgrade deepgram-sdk
-```
-
-## Configuration Changes
-
-### v3+ Client Initialization
-
-```python
-from deepgram import DeepgramClient
-
-# Basic initialization
-deepgram = DeepgramClient("YOUR_API_KEY")
-
-# With configuration
-from deepgram import DeepgramClientOptions
-config = DeepgramClientOptions(api_key="your-api-key")
-client = DeepgramClient(config=config)
-```
-
-### v5.0.0 Client Initialization
-
-```python
-from deepgram import DeepgramClient
-
-# API key authentication (server-side)
-client = DeepgramClient(api_key="YOUR_API_KEY")
-
-# Access token authentication (recommended for client-side)
-client = DeepgramClient(access_token="YOUR_ACCESS_TOKEN")
-
-# Environment variable authentication
-# Set DEEPGRAM_API_KEY or DEEPGRAM_TOKEN
-client = DeepgramClient()
-
-# With custom HTTP client
-import httpx
-client = DeepgramClient(
- httpx_client=httpx.Client(
- proxies="http://proxy.example.com",
- timeout=httpx.Timeout(30.0)
- )
-)
-```
-
-## Authentication Changes
-
-### Environment Variables
-
-- **v3+**: `DEEPGRAM_API_KEY`
-- **v5.0.0**: `DEEPGRAM_TOKEN` (takes precedence) or `DEEPGRAM_API_KEY`
-
-### Authentication Priority (v5.0.0)
-
-1. Explicit `access_token` parameter (highest priority)
-2. Explicit `api_key` parameter
-3. `DEEPGRAM_TOKEN` environment variable
-4. `DEEPGRAM_API_KEY` environment variable (lowest priority)
-
-## API Method Changes
-
-### Auth V1
-
-#### Grant Token
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-response = deepgram.auth.v("1").grant_token()
-```
-
-**v5.0.0**
-
-```python
-response = client.auth.v1.tokens.grant()
-
-# With custom TTL
-response = client.auth.v1.tokens.grant(ttl_seconds=60)
-```
-
-### Listen V1
-
-#### Response Types
-
-In v5.0.0, there are two types of responses for transcription requests:
-
-1. **Synchronous Response**: When no callback is provided, returns the full transcription result immediately
-2. **Asynchronous Response**: When a callback URL is provided, returns a "listen accepted" response and sends the actual transcription to the callback URL
-
-#### Transcribe URL
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import PrerecordedOptions, UrlSource
-
-payload: UrlSource = {
- "url": "https://dpgr.am/spacewalk.wav"
-}
-
-options = PrerecordedOptions(model="nova-3")
-
-response = deepgram.listen.rest.v("1").transcribe_url(
- payload,
- options
-)
-```
-
-**v5.0.0**
-
-```python
-# Returns the full transcription result immediately (synchronous)
-response = client.listen.v1.media.transcribe_url(
- url="https://dpgr.am/spacewalk.wav",
- model="nova-3"
-)
-```
-
-#### Transcribe File
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import PrerecordedOptions, FileSource
-
-with open("path/to/your/audio.wav", "rb") as file:
- buffer_data = file.read()
-
-payload: FileSource = {
- "buffer": buffer_data,
-}
-
-options = PrerecordedOptions(model="nova-3")
-
-response = deepgram.listen.rest.v("1").transcribe_file(
- payload,
- options
-)
-```
-
-**v5.0.0**
-
-```python
-# Returns the full transcription result immediately (synchronous)
-with open("audio.wav", "rb") as audio_file:
- response = client.listen.v1.media.transcribe_file(
- request=audio_file.read(),
- model="nova-3"
- )
-```
-
-#### Transcribe URL with Callback (Asynchronous)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-response = deepgram.listen.rest.v("1").transcribe_url_callback(
- payload,
- "https://your-callback-url.com/webhook",
- options=options
-)
-```
-
-**v5.0.0**
-
-```python
-# Returns a listen accepted response (not the full transcription)
-response = client.listen.v1.media.transcribe_url(
- url="https://dpgr.am/spacewalk.wav",
- callback="https://your-callback-url.com/webhook",
- model="nova-3"
-)
-# The actual transcription will be sent to the callback URL
-```
-
-#### Transcribe File with Callback (Asynchronous)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-response = deepgram.listen.rest.v("1").transcribe_file_callback(
- payload,
- "https://your-callback-url.com/webhook",
- options=options
-)
-```
-
-**v5.0.0**
-
-```python
-# Returns a listen accepted response (not the full transcription)
-with open("audio.wav", "rb") as audio_file:
- response = client.listen.v1.media.transcribe_file(
- request=audio_file.read(),
- callback="https://your-callback-url.com/webhook",
- model="nova-3"
- )
-# The actual transcription will be sent to the callback URL
-```
-
-#### WebSocket Streaming (Listen V1)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import LiveOptions, LiveTranscriptionEvents
-
-connection = deepgram.listen.websocket.v("1")
-
-@connection.on(LiveTranscriptionEvents.Transcript)
-def handle_transcript(result):
- print(result.channel.alternatives[0].transcript)
-
-connection.start(LiveOptions(model="nova-3", language="en-US"))
-connection.send(open("path/to/your/audio.wav", "rb").read())
-connection.finish()
-```
-
-**v5.0.0**
-
-```python
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Error: {error}"))
-
- connection.start_listening()
-
- # Read and send audio data
- with open("path/to/your/audio.wav", "rb") as audio_file:
- audio_bytes = audio_file.read()
- from deepgram.extensions.types.sockets import ListenV1MediaMessage
- connection.send_media(ListenV1MediaMessage(audio_bytes))
-
-```
-
-#### WebSocket Streaming (Listen V2 - New in v5.0.0)
-
-**v5.0.0**
-
-```python
-with client.listen.v2.connect(
- model="flux-general-en",
- encoding="linear16",
- sample_rate="16000"
-) as connection:
- def on_message(message):
- print(f"Received {message.type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Error: {error}"))
-
- connection.start_listening()
-```
-
-### Speak V1
-
-#### Generate Audio (REST)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import SpeakOptions
-
-options = SpeakOptions(model="aura-2-thalia-en")
-
-response = deepgram.speak.rest.v("1").save(
- "output.mp3",
- {"text": "Hello world!"},
- options
-)
-```
-
-**v5.0.0**
-
-```python
-response = client.speak.v1.audio.generate(
- text="Hello, this is a sample text to speech conversion.",
- model="aura-2-asteria-en"
-)
-
-# Save the audio file
-with open("output.mp3", "wb") as audio_file:
- audio_file.write(response.stream.getvalue())
-```
-
-#### WebSocket Streaming (Speak V1)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import (
- SpeakWSOptions,
- SpeakWebSocketEvents
-)
-
-connection = deepgram.speak.websocket.v("1")
-
-@connection.on(SpeakWebSocketEvents.AudioData)
-def handle_audio_data(data):
- # Handle audio data
- pass
-
-options = SpeakWSOptions(
- model="aura-2-thalia-en",
- encoding="linear16",
- sample_rate=16000
-)
-
-connection.start(options)
-connection.send_text("Hello, this is a text to speech example.")
-connection.flush()
-connection.wait_for_complete()
-connection.finish()
-```
-
-**v5.0.0**
-
-```python
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-with client.speak.v1.connect(
- model="aura-2-asteria-en",
- encoding="linear16",
- sample_rate=24000
-) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Error: {error}"))
-
- connection.start_listening()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1TextMessage
- connection.send_text(SpeakV1TextMessage(text="Hello, world!"))
-
- # Send control messages
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- connection.send_control(SpeakV1ControlMessage(type="Flush"))
- connection.send_control(SpeakV1ControlMessage(type="Close"))
-```
-
-### Agent V1
-
-#### Voice Agent Configuration
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import (
- SettingsOptions,
- Speak
-)
-
-connection = deepgram.agent.websocket.v("1")
-
-options = SettingsOptions()
-options.language = "en"
-options.agent.think.provider.type = "open_ai"
-options.agent.think.provider.model = "gpt-4o-mini"
-options.agent.think.prompt = "You are a helpful AI assistant."
-options.agent.listen.provider.type = "deepgram"
-options.agent.listen.provider.model = "nova-3"
-
-primary = Speak()
-primary.provider.type = "deepgram"
-primary.provider.model = "aura-2-zeus-en"
-
-options.agent.speak = [primary]
-options.greeting = "Hello, I'm your AI assistant."
-
-connection.start(options)
-```
-
-**v5.0.0**
-
-```python
-from deepgram.extensions.types.sockets import (
- AgentV1SettingsMessage, AgentV1Agent, AgentV1AudioConfig,
- AgentV1AudioInput, AgentV1Listen, AgentV1ListenProvider,
- AgentV1Think, AgentV1OpenAiThinkProvider, AgentV1SpeakProviderConfig,
- AgentV1DeepgramSpeakProvider
-)
-
-with client.agent.v1.connect() as agent:
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(encoding="linear16", sample_rate=44100)
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(type="deepgram", model="nova-3")
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai", model="gpt-4o-mini"
- )
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram", model="aura-2-asteria-en"
- )
- )
- )
- )
-
- agent.send_settings(settings)
- agent.start_listening()
-```
-
-### Read V1
-
-#### Text Analysis
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-from deepgram import AnalyzeOptions, TextSource
-
-options = AnalyzeOptions(
- sentiment=True,
- intents=True,
- topics=True,
- summarize=True
-)
-
-payload: TextSource = {
- "buffer": "The quick brown fox jumps over the lazy dog."
-}
-
-response = deepgram.read.analyze.v("1").analyze_text(
- payload,
- options
-)
-```
-
-**v5.0.0**
-
-```python
-response = client.read.v1.text.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True
-)
-```
-
-### Models V1
-
-#### List Models
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Not available in v3+
-```
-
-**v5.0.0**
-
-```python
-response = client.manage.v1.models.list()
-
-# Include outdated models
-response = client.manage.v1.models.list(include_outdated=True)
-```
-
-#### Get Model
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Not available in v3+
-```
-
-**v5.0.0**
-
-```python
-response = client.manage.v1.models.get(
- model_id="6ba7b814-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-### Manage V1
-
-#### Projects
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get projects
-response = deepgram.manage.v("1").get_projects()
-
-# Get project
-response = deepgram.manage.v("1").get_project("550e8400-e29b-41d4-a716-446655440000")
-
-# Update project
-response = deepgram.manage.v("1").update_project("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete project
-response = deepgram.manage.v("1").delete_project("550e8400-e29b-41d4-a716-446655440000")
-```
-
-**v5.0.0**
-
-```python
-# Get projects
-response = client.manage.v1.projects.list()
-
-# Get project
-response = client.manage.v1.projects.get(project_id="550e8400-e29b-41d4-a716-446655440000")
-
-# Update project
-response = client.manage.projects.update(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- name="New Project Name"
-)
-
-# Delete project
-response = client.manage.projects.delete(project_id="550e8400-e29b-41d4-a716-446655440000")
-```
-
-#### Keys
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# List keys
-response = deepgram.manage.v("1").get_keys("550e8400-e29b-41d4-a716-446655440000")
-
-# Get key
-response = deepgram.manage.v("1").get_key("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
-# Create key
-response = deepgram.manage.v("1").create_key("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete key
-response = deepgram.manage.v("1").delete_key("550e8400-e29b-41d4-a716-446655440000", "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v5.0.0**
-
-```python
-# List keys
-response = client.manage.v1.projects.keys.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get key
-response = client.manage.v1.projects.keys.get(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- key_id="6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-)
-
-# Create key
-response = client.manage.projects.keys.create(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- request={"key": "value"}
-)
-
-# Delete key
-response = client.manage.projects.keys.delete(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- key_id="6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-#### Members
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get members
-response = deepgram.manage.v("1").get_members("550e8400-e29b-41d4-a716-446655440000")
-
-# Remove member
-response = deepgram.manage.v("1").remove_member("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v5.0.0**
-
-```python
-# Get members
-response = client.manage.v1.projects.members.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Remove member
-response = client.manage.v1.projects.members.delete(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- member_id="6ba7b811-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-#### Scopes
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get member scopes
-response = deepgram.manage.v("1").get_member_scopes("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8")
-
-# Update scope
-response = deepgram.manage.v("1").update_member_scope("550e8400-e29b-41d4-a716-446655440000", "6ba7b811-9dad-11d1-80b4-00c04fd430c8", options)
-```
-
-**v5.0.0**
-
-```python
-# Get member scopes
-response = client.manage.v1.projects.members.scopes.list(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- member_id="6ba7b811-9dad-11d1-80b4-00c04fd430c8"
-)
-
-# Update scope
-response = client.manage.projects.members.scopes.update(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- member_id="6ba7b811-9dad-11d1-80b4-00c04fd430c8",
- scope="admin"
-)
-```
-
-#### Invitations
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# List invites
-response = deepgram.manage.v("1").get_invites("550e8400-e29b-41d4-a716-446655440000")
-
-# Send invite
-response = deepgram.manage.v("1").send_invite("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete invite
-response = deepgram.manage.v("1").delete_invite("550e8400-e29b-41d4-a716-446655440000", "hello@deepgram.com")
-
-# Leave project
-response = deepgram.manage.v("1").leave_project("550e8400-e29b-41d4-a716-446655440000")
-```
-
-**v5.0.0**
-
-```python
-# List invites
-response = client.manage.v1.projects.members.invites.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Send invite
-response = client.manage.v1.projects.members.invites.create(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- email="hello@deepgram.com",
- scope="member"
-)
-
-# Delete invite
-response = client.manage.v1.projects.members.invites.delete(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- email="hello@deepgram.com"
-)
-
-# Leave project
-response = client.manage.v1.projects.leave(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-```
-
-#### Usage
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get all requests
-response = deepgram.manage.v("1").get_usage_requests("550e8400-e29b-41d4-a716-446655440000")
-
-# Get request
-response = deepgram.manage.v("1").get_usage_request("550e8400-e29b-41d4-a716-446655440000", "6ba7b812-9dad-11d1-80b4-00c04fd430c8")
-
-# Get fields
-response = deepgram.manage.v("1").get_usage_fields("550e8400-e29b-41d4-a716-446655440000")
-
-# Summarize usage
-response = deepgram.manage.v("1").get_usage_summary("550e8400-e29b-41d4-a716-446655440000")
-```
-
-**v5.0.0**
-
-```python
-# Get all requests
-response = client.manage.v1.projects.requests.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get request
-response = client.manage.v1.projects.requests.get(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- request_id="6ba7b812-9dad-11d1-80b4-00c04fd430c8"
-)
-
-# Get fields
-response = client.manage.v1.projects.usage.fields.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get usage summary
-response = client.manage.v1.projects.usage.get(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get usage breakdown (new in v5)
-response = client.manage.v1.projects.usage.breakdown.get(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-```
-
-#### Billing
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get all balances
-response = deepgram.manage.v("1").get_balances("550e8400-e29b-41d4-a716-446655440000")
-
-# Get balance
-response = deepgram.manage.v("1").get_balance("550e8400-e29b-41d4-a716-446655440000", "6ba7b813-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v5.0.0**
-
-```python
-# Get all balances
-response = client.manage.v1.projects.balances.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get balance
-response = client.manage.v1.projects.balances.get(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- balance_id="6ba7b813-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-#### Models (Project-specific)
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Get all project models
-response = deepgram.manage.v("1").get_project_models("550e8400-e29b-41d4-a716-446655440000")
-
-# Get model
-response = deepgram.manage.v("1").get_project_model("550e8400-e29b-41d4-a716-446655440000", "6ba7b814-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v5.0.0**
-
-```python
-# Get all project models
-response = client.manage.v1.projects.models.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get model
-response = client.manage.v1.projects.models.get(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- model_id="6ba7b814-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-### Self-Hosted V1
-
-#### Distribution Credentials
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# List credentials
-response = deepgram.selfhosted.v("1").list_selfhosted_credentials("550e8400-e29b-41d4-a716-446655440000")
-
-# Get credentials
-response = deepgram.selfhosted.v("1").get_selfhosted_credentials("550e8400-e29b-41d4-a716-446655440000", "6ba7b815-9dad-11d1-80b4-00c04fd430c8")
-
-# Create credentials
-response = deepgram.selfhosted.v("1").create_selfhosted_credentials("550e8400-e29b-41d4-a716-446655440000", options)
-
-# Delete credentials
-response = deepgram.selfhosted.v("1").delete_selfhosted_credentials("550e8400-e29b-41d4-a716-446655440000", "6ba7b815-9dad-11d1-80b4-00c04fd430c8")
-```
-
-**v5.0.0**
-
-```python
-# List credentials
-response = client.self_hosted.v1.distribution_credentials.list(
- project_id="550e8400-e29b-41d4-a716-446655440000"
-)
-
-# Get credentials
-response = client.self_hosted.v1.distribution_credentials.get(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- distribution_credentials_id="6ba7b815-9dad-11d1-80b4-00c04fd430c8"
-)
-
-# Create credentials
-response = client.self_hosted.v1.distribution_credentials.create(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- scopes=["read", "write"],
- provider="quay",
- comment="Development credentials"
-)
-
-# Delete credentials
-response = client.self_hosted.v1.distribution_credentials.delete(
- project_id="550e8400-e29b-41d4-a716-446655440000",
- distribution_credentials_id="6ba7b815-9dad-11d1-80b4-00c04fd430c8"
-)
-```
-
-## WebSocket Keep Alive Functionality
-
-**v3+ (3.0.0 - 4.8.1)**
-
-```python
-# Keep alive was passed as a config option
-config = DeepgramClientOptions(
- options={"keepalive": "true"}
-)
-deepgram = DeepgramClient(API_KEY, config)
-```
-
-**v5.0.0**
-
-```python
-# Keep alive is now manually managed via control messages.
-from deepgram.extensions.types.sockets import ListenV1ControlMessage, AgentV1ControlMessage
-
-# For Listen V1 connections
-with client.listen.v1.connect(model="nova-3") as connection:
- # Send keep alive message
- connection.send_control(ListenV1ControlMessage(type="KeepAlive"))
-
-# For Agent V1 connections
-with client.agent.v1.connect() as agent:
- # Send keep alive message
- agent.send_control(AgentV1ControlMessage(type="KeepAlive"))
-```
-
-## Breaking Changes Summary
-
-### Major Changes
-
-1. **Authentication**: New access token support with environment variable `DEEPGRAM_TOKEN`
-2. **API structure**: Flattened method names and cleaner parameter passing
-3. **WebSocket API**: Complete redesign with context managers and typed message objects
-4. **WebSocket Keep Alive**: Managed via control messages, no longer an automatic thing via config
-5. **Type safety**: Enhanced type annotations and response objects
-6. **Error handling**: Improved error types and handling
-
-### Removed Features
-
-- Custom configuration objects (replaced with direct parameters)
-- String-based versioning (`v("1")` β `v1`)
-- Separate callback methods (integrated into main methods)
-- Legacy WebSocket event system
-
-### New Features in v5.0.0
-
-- **Listen V2**: Advanced conversational speech recognition with contextual turn detection
-- **Enhanced Agent V1**: More flexible voice agent configuration
-- **Raw response access**: Access to HTTP headers and raw response data
-- **Custom HTTP client**: Support for custom httpx clients
-- **Usage breakdown**: Detailed usage analytics
-- **Better async support**: Full async/await support throughout
-
-### Migration Checklist
-
-- [ ] Upgrade to latest version: `pip install --upgrade deepgram-sdk`
-- [ ] Update import statements if needed
-- [ ] Replace API key configuration with new authentication methods
-- [ ] Update all API method calls to new structure
-- [ ] Migrate WebSocket connections to new context manager pattern
-- [ ] Update WebSocket keep alive implementation
-- [ ] Update error handling for new exception types
-- [ ] Test all functionality with new API structure
diff --git a/examples/agent/v1/connect/async.py b/examples/agent/v1/connect/async.py
deleted file mode 100644
index 92574f85..00000000
--- a/examples/agent/v1/connect/async.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SocketClientResponse,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- async with client.agent.v1.connect() as agent:
- # Send minimal settings to configure the agent per the latest spec
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=16000,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- )
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- print("Send SettingsConfiguration message")
- await agent.send_settings(settings)
- def on_message(message: AgentV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- agent.on(EventType.OPEN, lambda _: print("Connection opened"))
- agent.on(EventType.MESSAGE, on_message)
- agent.on(EventType.CLOSE, lambda _: print("Connection closed"))
- agent.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening task and cancel after brief demo
- # In production, you would typically await agent.start_listening() directly
- # which runs until the connection closes or is interrupted
- listen_task = asyncio.create_task(agent.start_listening())
- await asyncio.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
- listen_task.cancel()
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/agent/v1/connect/main.py b/examples/agent/v1/connect/main.py
deleted file mode 100644
index 9cdd2270..00000000
--- a/examples/agent/v1/connect/main.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SocketClientResponse,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-client = DeepgramClient()
-
-try:
- with client.agent.v1.connect() as agent:
- # Send minimal settings to configure the agent per the latest spec
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=44100,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- ),
- prompt='Reply only and explicitly with "OK".',
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- print("Send SettingsConfiguration message")
- agent.send_settings(settings)
-
- def on_message(message: AgentV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- agent.on(EventType.OPEN, lambda _: print("Connection opened"))
- agent.on(EventType.MESSAGE, on_message)
- agent.on(EventType.CLOSE, lambda _: print("Connection closed"))
- agent.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call agent.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=agent.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/agent/v1/connect/with_auth_token.py b/examples/agent/v1/connect/with_auth_token.py
deleted file mode 100644
index 5f593712..00000000
--- a/examples/agent/v1/connect/with_auth_token.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SocketClientResponse,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- with client.agent.v1.connect() as agent:
- # Send minimal settings to configure the agent per the latest spec
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=44100,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- ),
- prompt='Reply only and explicitly with "OK".',
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- print("Send SettingsConfiguration message")
- agent.send_settings(settings)
- def on_message(message: AgentV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- agent.on(EventType.OPEN, lambda _: print("Connection opened"))
- agent.on(EventType.MESSAGE, on_message)
- agent.on(EventType.CLOSE, lambda _: print("Connection closed"))
- agent.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call agent.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=agent.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/agent/v1/connect/with_raw_response.py b/examples/agent/v1/connect/with_raw_response.py
deleted file mode 100644
index 6295eb3d..00000000
--- a/examples/agent/v1/connect/with_raw_response.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import json # noqa: F401
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-client = DeepgramClient()
-
-try:
- with client.agent.v1.with_raw_response.connect() as agent:
- # Send minimal settings to configure the agent per the latest spec
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=44100,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- ),
- prompt='Reply only and explicitly with "OK".',
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- # Send settings using raw method
- print("Send SettingsConfiguration message")
- agent._send_model(settings)
-
- # EXAMPLE ONLY: Manually read messages for demo purposes
- # In production, you would use the standard event handlers and start_listening()
- print("Connection opened")
- try:
- start = time.time()
- while time.time() - start < 3:
- raw = agent._websocket.recv() # type: ignore[attr-defined]
- if isinstance(raw, (bytes, bytearray)):
- print("Received audio event")
- continue
- try:
- data = json.loads(raw)
- msg_type = data.get("type", "Unknown")
- print(f"Received {msg_type} event")
- if msg_type == "AgentAudioDone":
- break
- except Exception:
- print("Received message event")
- except Exception as e:
- print(f"Caught: {e}")
- finally:
- print("Connection closed")
-except Exception as e:
- print(f"Caught: {e}")
\ No newline at end of file
diff --git a/examples/fixtures/audio.mp3 b/examples/fixtures/audio.mp3
deleted file mode 100644
index ca632f71..00000000
Binary files a/examples/fixtures/audio.mp3 and /dev/null differ
diff --git a/examples/fixtures/audio.wav b/examples/fixtures/audio.wav
deleted file mode 100644
index 498be744..00000000
Binary files a/examples/fixtures/audio.wav and /dev/null differ
diff --git a/examples/listen/v1/connect/async.py b/examples/listen/v1/connect/async.py
deleted file mode 100644
index 7fdcc1ba..00000000
--- a/examples/listen/v1/connect/async.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- async with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening task and cancel after brief demo
- # In production, you would typically await connection.start_listening() directly
- # which runs until the connection closes or is interrupted
- listen_task = asyncio.create_task(connection.start_listening())
- await asyncio.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
- listen_task.cancel()
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/listen/v1/connect/main.py b/examples/listen/v1/connect/main.py
deleted file mode 100644
index f4a016c9..00000000
--- a/examples/listen/v1/connect/main.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/connect/with_auth_token.py b/examples/listen/v1/connect/with_auth_token.py
deleted file mode 100644
index 762498b4..00000000
--- a/examples/listen/v1/connect/with_auth_token.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/connect/with_raw_response.py b/examples/listen/v1/connect/with_raw_response.py
deleted file mode 100644
index 43c5342b..00000000
--- a/examples/listen/v1/connect/with_raw_response.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.listen.v1.with_raw_response.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_file/async.py b/examples/listen/v1/media/transcribe_file/async.py
deleted file mode 100644
index 67afb75b..00000000
--- a/examples/listen/v1/media/transcribe_file/async.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import asyncio
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- # Path to audio file from fixtures
- script_dir = os.path.dirname(os.path.abspath(__file__))
- audio_path = os.path.join(script_dir, "..", "..", "..", "..", "fixtures", "audio.wav")
-
- with open(audio_path, "rb") as audio_file:
- audio_data = audio_file.read()
-
- print("Request sent")
- response = await client.listen.v1.media.transcribe_file(
- request=audio_data,
- model="nova-3",
- )
- print("Response received")
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/listen/v1/media/transcribe_file/main.py b/examples/listen/v1/media/transcribe_file/main.py
deleted file mode 100644
index da75541d..00000000
--- a/examples/listen/v1/media/transcribe_file/main.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- # Path to audio file from fixtures
- script_dir = os.path.dirname(os.path.abspath(__file__))
- audio_path = os.path.join(script_dir, "..", "..", "..", "..", "fixtures", "audio.wav")
-
- with open(audio_path, "rb") as audio_file:
- audio_data = audio_file.read()
-
- print("Request sent")
- response = client.listen.v1.media.transcribe_file(
- request=audio_data,
- model="nova-3",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_file/with_auth_token.py b/examples/listen/v1/media/transcribe_file/with_auth_token.py
deleted file mode 100644
index c619b544..00000000
--- a/examples/listen/v1/media/transcribe_file/with_auth_token.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- # Path to audio file from fixtures
- script_dir = os.path.dirname(os.path.abspath(__file__))
- audio_path = os.path.join(script_dir, "..", "..", "..", "..", "fixtures", "audio.wav")
-
- with open(audio_path, "rb") as audio_file:
- audio_data = audio_file.read()
-
- print("Request sent")
- response = client.listen.v1.media.transcribe_file(
- request=audio_data,
- model="nova-3",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_file/with_raw_response.py b/examples/listen/v1/media/transcribe_file/with_raw_response.py
deleted file mode 100644
index 4eec3ce5..00000000
--- a/examples/listen/v1/media/transcribe_file/with_raw_response.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- # Path to audio file from fixtures
- script_dir = os.path.dirname(os.path.abspath(__file__))
- audio_path = os.path.join(script_dir, "..", "..", "..", "..", "fixtures", "audio.wav")
-
- with open(audio_path, "rb") as audio_file:
- audio_data = audio_file.read()
-
- print("Request sent")
- response = client.listen.v1.media.with_raw_response.transcribe_file(
- request=audio_data,
- model="nova-3",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_url/async.py b/examples/listen/v1/media/transcribe_url/async.py
deleted file mode 100644
index c86a996e..00000000
--- a/examples/listen/v1/media/transcribe_url/async.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- print("Request sent")
- response = await client.listen.v1.media.transcribe_url(
- model="nova-3",
- url="https://dpgr.am/spacewalk.wav",
- )
- print("Response received")
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/listen/v1/media/transcribe_url/main.py b/examples/listen/v1/media/transcribe_url/main.py
deleted file mode 100644
index 654c148f..00000000
--- a/examples/listen/v1/media/transcribe_url/main.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- response = client.listen.v1.media.transcribe_url(
- model="nova-3",
- url="https://dpgr.am/spacewalk.wav",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_url/with_auth_token.py b/examples/listen/v1/media/transcribe_url/with_auth_token.py
deleted file mode 100644
index a90556d3..00000000
--- a/examples/listen/v1/media/transcribe_url/with_auth_token.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- print("Request sent")
- response = client.listen.v1.media.transcribe_url(
- model="nova-3",
- url="https://dpgr.am/spacewalk.wav",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v1/media/transcribe_url/with_raw_response.py b/examples/listen/v1/media/transcribe_url/with_raw_response.py
deleted file mode 100644
index 1ea08727..00000000
--- a/examples/listen/v1/media/transcribe_url/with_raw_response.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- response = client.listen.v1.media.with_raw_response.transcribe_url(
- model="nova-3",
- url="https://dpgr.am/spacewalk.wav",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/listen/v2/connect/async.py b/examples/listen/v2/connect/async.py
deleted file mode 100644
index 43bafcb1..00000000
--- a/examples/listen/v2/connect/async.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- async with client.listen.v2.connect(model="flux-general-en", encoding="linear16", sample_rate="16000") as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening task and cancel after brief demo
- # In production, you would typically await connection.start_listening() directly
- # which runs until the connection closes or is interrupted
- listen_task = asyncio.create_task(connection.start_listening())
- await asyncio.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
- listen_task.cancel()
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
-
-
diff --git a/examples/listen/v2/connect/main.py b/examples/listen/v2/connect/main.py
deleted file mode 100644
index bbe23aa4..00000000
--- a/examples/listen/v2/connect/main.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.listen.v2.connect(model="flux-general-en", encoding="linear16", sample_rate="16000") as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
-
-
diff --git a/examples/listen/v2/connect/with_auth_token.py b/examples/listen/v2/connect/with_auth_token.py
deleted file mode 100644
index acbbdb9f..00000000
--- a/examples/listen/v2/connect/with_auth_token.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- with client.listen.v2.connect(model="flux-general-en", encoding="linear16", sample_rate="16000") as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
-
-
diff --git a/examples/listen/v2/connect/with_raw_response.py b/examples/listen/v2/connect/with_raw_response.py
deleted file mode 100644
index ace1dd8d..00000000
--- a/examples/listen/v2/connect/with_raw_response.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import threading
-import time
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.listen.v2.with_raw_response.connect(model="flux-general-en", encoding="linear16", sample_rate="16000") as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
-
-
diff --git a/examples/read/v1/text/analyze/async.py b/examples/read/v1/text/analyze/async.py
deleted file mode 100644
index d929c051..00000000
--- a/examples/read/v1/text/analyze/async.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- print("Request sent")
- response = await client.read.v1.text.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True,
- )
- print("Response received")
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/read/v1/text/analyze/main.py b/examples/read/v1/text/analyze/main.py
deleted file mode 100644
index e2b5345f..00000000
--- a/examples/read/v1/text/analyze/main.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- response = client.read.v1.text.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True,
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/read/v1/text/analyze/with_auth_token.py b/examples/read/v1/text/analyze/with_auth_token.py
deleted file mode 100644
index b6bc20a7..00000000
--- a/examples/read/v1/text/analyze/with_auth_token.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- print("Request sent")
- response = client.read.v1.text.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True,
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/read/v1/text/analyze/with_raw_response.py b/examples/read/v1/text/analyze/with_raw_response.py
deleted file mode 100644
index e30b81e5..00000000
--- a/examples/read/v1/text/analyze/with_raw_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- response = client.read.v1.text.with_raw_response.analyze(
- request={"text": "Hello, world!"},
- language="en",
- sentiment=True,
- summarize=True,
- topics=True,
- intents=True,
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/requirements.txt b/examples/requirements.txt
deleted file mode 100644
index ce531c38..00000000
--- a/examples/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-json5
-python-dotenv>=1.0.0
diff --git a/examples/speak/v1/audio/generate/async.py b/examples/speak/v1/audio/generate/async.py
deleted file mode 100644
index bbf638a7..00000000
--- a/examples/speak/v1/audio/generate/async.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import asyncio
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- print("Request sent")
- response = client.speak.v1.audio.generate(
- text="Hello, this is a sample text to speech conversion.",
- )
- print("Response received")
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/speak/v1/audio/generate/main.py b/examples/speak/v1/audio/generate/main.py
deleted file mode 100644
index 2c6aabc5..00000000
--- a/examples/speak/v1/audio/generate/main.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- response = client.speak.v1.audio.generate(
- text="Hello, this is a sample text to speech conversion.",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/speak/v1/audio/generate/with_auth_token.py b/examples/speak/v1/audio/generate/with_auth_token.py
deleted file mode 100644
index bcd896a9..00000000
--- a/examples/speak/v1/audio/generate/with_auth_token.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- print("Request sent")
- response = client.speak.v1.audio.generate(
- text="Hello, this is a sample text to speech conversion.",
- )
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/speak/v1/audio/generate/with_raw_response.py b/examples/speak/v1/audio/generate/with_raw_response.py
deleted file mode 100644
index 9de6f440..00000000
--- a/examples/speak/v1/audio/generate/with_raw_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import DeepgramClient
-
-client = DeepgramClient()
-
-try:
- print("Request sent")
- with client.speak.v1.audio.with_raw_response.generate(
- text="Hello, this is a sample text to speech conversion.",
- ) as response:
- print("Response received")
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/speak/v1/connect/async.py b/examples/speak/v1/connect/async.py
deleted file mode 100644
index 4439308f..00000000
--- a/examples/speak/v1/connect/async.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import asyncio
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-client = AsyncDeepgramClient()
-
-async def main() -> None:
- try:
- async with client.speak.v1.connect(model="aura-2-asteria-en", encoding="linear16", sample_rate=24000) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening task and cancel after brief demo
- # In production, you would typically await connection.start_listening() directly
- # which runs until the connection closes or is interrupted
- listen_task = asyncio.create_task(connection.start_listening())
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- print("Send Flush message")
- await connection.send_control(SpeakV1ControlMessage(type="Flush"))
- print("Send Close message")
- await connection.send_control(SpeakV1ControlMessage(type="Close"))
-
- await asyncio.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
- listen_task.cancel()
- except Exception as e:
- print(f"Caught: {e}")
-
-asyncio.run(main())
diff --git a/examples/speak/v1/connect/main.py b/examples/speak/v1/connect/main.py
deleted file mode 100644
index 6e7710c8..00000000
--- a/examples/speak/v1/connect/main.py
+++ /dev/null
@@ -1,43 +0,0 @@
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import threading
-import time
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.speak.v1.connect(model="aura-2-asteria-en", encoding="linear16", sample_rate=24000) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- print("Send Flush message")
- connection.send_control(SpeakV1ControlMessage(type="Flush"))
- print("Send Close message")
- connection.send_control(SpeakV1ControlMessage(type="Close"))
-
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/speak/v1/connect/with_auth_token.py b/examples/speak/v1/connect/with_auth_token.py
deleted file mode 100644
index d39d134c..00000000
--- a/examples/speak/v1/connect/with_auth_token.py
+++ /dev/null
@@ -1,50 +0,0 @@
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import threading
-import time
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-try:
- # Using access token instead of API key
- authClient = DeepgramClient()
-
- print("Request sent")
- authResponse = authClient.auth.v1.tokens.grant()
- print("Response received")
-
- client = DeepgramClient(access_token=authResponse.access_token)
-
- with client.speak.v1.connect(model="aura-2-asteria-en", encoding="linear16", sample_rate=24000) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- print("Send Flush message")
- connection.send_control(SpeakV1ControlMessage(type="Flush"))
- print("Send Close message")
- connection.send_control(SpeakV1ControlMessage(type="Close"))
-
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/examples/speak/v1/connect/with_raw_response.py b/examples/speak/v1/connect/with_raw_response.py
deleted file mode 100644
index 838eaaa0..00000000
--- a/examples/speak/v1/connect/with_raw_response.py
+++ /dev/null
@@ -1,43 +0,0 @@
-
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import threading
-import time
-
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-client = DeepgramClient()
-
-try:
- with client.speak.v1.with_raw_response.connect(model="aura-2-asteria-en", encoding="linear16", sample_rate=24000) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # EXAMPLE ONLY: Start listening in a background thread for demo purposes
- # In production, you would typically call connection.start_listening() directly
- # which blocks until the connection closes, or integrate into your async event loop
- threading.Thread(target=connection.start_listening, daemon=True).start()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- print("Send Flush message")
- connection.send_control(SpeakV1ControlMessage(type="Flush"))
- print("Send Close message")
- connection.send_control(SpeakV1ControlMessage(type="Close"))
-
- time.sleep(3) # EXAMPLE ONLY: Wait briefly to see some events before exiting
-except Exception as e:
- print(f"Caught: {e}")
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 3806ff37..00000000
--- a/mypy.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[mypy]
-plugins = pydantic.mypy
-exclude = examples/.*
diff --git a/poetry.lock b/poetry.lock
index 7b42b5ff..88167ca9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2025.10.5"
+version = "2025.11.12"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
files = [
- {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"},
- {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"},
+ {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"},
+ {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 31a5535b..74873a64 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,9 @@
[project]
name = "deepgram-sdk"
-dynamic = ["version"]
[tool.poetry]
name = "deepgram-sdk"
-version = "5.3.0"
+version = "5.3.1"
description = ""
readme = "README.md"
authors = []
@@ -26,11 +25,13 @@ classifiers = [
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
- "License :: OSI Approved :: MIT License",
+ "License :: OSI Approved :: MIT License"
+]
+packages = [
+ { include = "deepgram", from = "src"}
]
-packages = [{ include = "deepgram", from = "src" }]
-[project.urls]
+[tool.poetry.urls]
Repository = 'https://github.com/deepgram/deepgram-python-sdk'
[tool.poetry.dependencies]
@@ -50,7 +51,7 @@ types-python-dateutil = "^2.9.0.20240316"
ruff = "==0.11.5"
[tool.pytest.ini_options]
-testpaths = ["tests"]
+testpaths = [ "tests" ]
asyncio_mode = "auto"
[tool.mypy]
@@ -61,20 +62,20 @@ line-length = 120
[tool.ruff.lint]
select = [
- "E", # pycodestyle errors
- "F", # pyflakes
- "I", # isort
+ "E", # pycodestyle errors
+ "F", # pyflakes
+ "I", # isort
]
ignore = [
- "E402", # Module level import not at top of file
- "E501", # Line too long
- "E711", # Comparison to `None` should be `cond is not None`
- "E712", # Avoid equality comparisons to `True`; use `if ...:` checks
- "E721", # Use `is` and `is not` for type comparisons, or `isinstance()` for insinstance checks
- "E722", # Do not use bare `except`
- "E731", # Do not assign a `lambda` expression, use a `def`
- "F821", # Undefined name
- "F841", # Local variable ... is assigned to but never used
+ "E402", # Module level import not at top of file
+ "E501", # Line too long
+ "E711", # Comparison to `None` should be `cond is not None`
+ "E712", # Avoid equality comparisons to `True`; use `if ...:` checks
+ "E721", # Use `is` and `is not` for type comparisons, or `isinstance()` for insinstance checks
+ "E722", # Do not use bare `except`
+ "E731", # Do not assign a `lambda` expression, use a `def`
+ "F821", # Undefined name
+ "F841" # Local variable ... is assigned to but never used
]
[tool.ruff.lint.isort]
diff --git a/reference.md b/reference.md
index 54899f1e..8e7fcede 100644
--- a/reference.md
+++ b/reference.md
@@ -163,6 +163,41 @@ client = DeepgramClient(
api_key="YOUR_API_KEY",
)
client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
@@ -877,7 +912,9 @@ from deepgram import DeepgramClient
client = DeepgramClient(
api_key="YOUR_API_KEY",
)
-client.manage.v1.models.list()
+client.manage.v1.models.list(
+ include_outdated=True,
+)
```
@@ -1078,6 +1115,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
```
@@ -1383,6 +1422,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
```
@@ -1849,6 +1889,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
```
@@ -2000,6 +2041,8 @@ Generates a list of requests for a specific project
```python
+import datetime
+
from deepgram import DeepgramClient
client = DeepgramClient(
@@ -2007,8 +2050,20 @@ client = DeepgramClient(
)
client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
```
@@ -2239,10 +2294,50 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
```
@@ -2816,7 +2911,10 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
@@ -2951,6 +3049,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
```
@@ -3038,6 +3138,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
```
@@ -3523,10 +3624,51 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
```
@@ -3958,6 +4100,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
```
@@ -4044,6 +4188,18 @@ client = DeepgramClient(
api_key="YOUR_API_KEY",
)
client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh
deleted file mode 100755
index 329a9504..00000000
--- a/scripts/run_examples.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/bin/bash
-
-# Check for DEEPGRAM_API_KEY in environment or .env file
-if [ -z "$DEEPGRAM_API_KEY" ] && [ ! -f .env ] || ([ -f .env ] && ! grep -q "DEEPGRAM_API_KEY" .env); then
- echo "β DEEPGRAM_API_KEY not found in environment variables or .env file"
- echo "Please set up your Deepgram API key before running examples"
- echo "You can:"
- echo " 1. Export it: export DEEPGRAM_API_KEY=your_key_here"
- echo " 2. Add it to a .env file: echo 'DEEPGRAM_API_KEY=your_key_here' > .env"
- exit 1
-fi
-
-echo "β
DEEPGRAM_API_KEY found, proceeding with examples..."
-echo ""
-
-
-echo "β¨β¨β¨β¨ Running speak/v1/audio/generate/ examples β¨β¨β¨β¨"
-
-echo "Running speak/v1/audio/generate/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/audio/generate/main.py
-echo "Running speak/v1/audio/generate/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/audio/generate/async.py
-echo "Running speak/v1/audio/generate/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/audio/generate/with_raw_response.py
-echo "Running speak/v1/audio/generate/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/audio/generate/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running speak/v1/connect/ examples β¨β¨β¨β¨"
-
-echo "Running speak/v1/connect/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/connect/main.py
-echo "Running speak/v1/connect/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/connect/async.py
-echo "Running speak/v1/connect/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/connect/with_raw_response.py
-echo "Running speak/v1/connect/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/speak/v1/connect/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running read/v1/text/analyze/ examples β¨β¨β¨β¨"
-
-echo "Running read/v1/text/analyze/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/read/v1/text/analyze/main.py
-echo "Running read/v1/text/analyze/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/read/v1/text/analyze/async.py
-echo "Running read/v1/text/analyze/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/read/v1/text/analyze/with_raw_response.py
-echo "Running read/v1/text/analyze/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/read/v1/text/analyze/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running listen/v1/connect/ examples β¨β¨β¨β¨"
-
-echo "Running listen/v1/connect/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/connect/main.py
-echo "Running listen/v1/connect/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/connect/async.py
-echo "Running listen/v1/connect/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/connect/with_raw_response.py
-echo "Running listen/v1/connect/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/connect/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running listen/v1/media/transcribe_file/ examples β¨β¨β¨β¨"
-
-echo "Running listen/v1/media/transcribe_file/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_file/main.py
-echo "Running listen/v1/media/transcribe_file/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_file/async.py
-echo "Running listen/v1/media/transcribe_file/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_file/with_raw_response.py
-echo "Running listen/v1/media/transcribe_file/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_file/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running listen/v1/media/transcribe_url/ examples β¨β¨β¨β¨"
-
-echo "Running listen/v1/media/transcribe_url/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_url/main.py
-echo "Running listen/v1/media/transcribe_url/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_url/async.py
-echo "Running listen/v1/media/transcribe_url/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_url/with_raw_response.py
-echo "Running listen/v1/media/transcribe_url/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v1/media/transcribe_url/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running listen/v2/connect/ examples β¨β¨β¨β¨"
-
-echo "Running listen/v2/connect/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v2/connect/main.py
-echo "Running listen/v2/connect/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v2/connect/async.py
-echo "Running listen/v2/connect/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v2/connect/with_raw_response.py
-echo "Running listen/v2/connect/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/listen/v2/connect/with_auth_token.py
-
-echo "β¨β¨β¨β¨ Running agent/v1/connect/ examples β¨β¨β¨β¨"
-
-echo "Running agent/v1/connect/main.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/agent/v1/connect/main.py
-echo "Running agent/v1/connect/async.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/agent/v1/connect/async.py
-echo "Running agent/v1/connect/with_raw_response.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/agent/v1/connect/with_raw_response.py
-echo "Running agent/v1/connect/with_auth_token.py"
-DEEPGRAM_DEBUG=1 poetry run python examples/agent/v1/connect/with_auth_token.py
\ No newline at end of file
diff --git a/src/deepgram/agent/__init__.py b/src/deepgram/agent/__init__.py
index 148ad154..40acb778 100644
--- a/src/deepgram/agent/__init__.py
+++ b/src/deepgram/agent/__init__.py
@@ -7,7 +7,445 @@
if typing.TYPE_CHECKING:
from . import v1
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1"}
+ from .v1 import (
+ AgentV1AgentAudioDone,
+ AgentV1AgentAudioDoneParams,
+ AgentV1AgentStartedSpeaking,
+ AgentV1AgentStartedSpeakingParams,
+ AgentV1AgentThinking,
+ AgentV1AgentThinkingParams,
+ AgentV1ConversationText,
+ AgentV1ConversationTextParams,
+ AgentV1ConversationTextRole,
+ AgentV1Error,
+ AgentV1ErrorParams,
+ AgentV1FunctionCallRequest,
+ AgentV1FunctionCallRequestFunctionsItem,
+ AgentV1FunctionCallRequestFunctionsItemParams,
+ AgentV1FunctionCallRequestParams,
+ AgentV1InjectAgentMessage,
+ AgentV1InjectAgentMessageParams,
+ AgentV1InjectUserMessage,
+ AgentV1InjectUserMessageParams,
+ AgentV1InjectionRefused,
+ AgentV1InjectionRefusedParams,
+ AgentV1KeepAlive,
+ AgentV1KeepAliveParams,
+ AgentV1PromptUpdated,
+ AgentV1PromptUpdatedParams,
+ AgentV1ReceiveFunctionCallResponse,
+ AgentV1ReceiveFunctionCallResponseParams,
+ AgentV1SendFunctionCallResponse,
+ AgentV1SendFunctionCallResponseParams,
+ AgentV1Settings,
+ AgentV1SettingsAgent,
+ AgentV1SettingsAgentContext,
+ AgentV1SettingsAgentContextMessagesItem,
+ AgentV1SettingsAgentContextMessagesItemContent,
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ AgentV1SettingsAgentContextMessagesItemParams,
+ AgentV1SettingsAgentContextParams,
+ AgentV1SettingsAgentListen,
+ AgentV1SettingsAgentListenParams,
+ AgentV1SettingsAgentListenProvider,
+ AgentV1SettingsAgentListenProviderParams,
+ AgentV1SettingsAgentParams,
+ AgentV1SettingsAgentSpeak,
+ AgentV1SettingsAgentSpeakEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesia,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgram,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAi,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakItem,
+ AgentV1SettingsAgentSpeakItemEndpoint,
+ AgentV1SettingsAgentSpeakItemEndpointParams,
+ AgentV1SettingsAgentSpeakItemParams,
+ AgentV1SettingsAgentSpeakItemProvider,
+ AgentV1SettingsAgentSpeakItemProviderAwsPolly,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakItemProviderCartesia,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaParams,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakItemProviderDeepgram,
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakItemProviderDeepgramParams,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabs,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProviderOpenAi,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiParams,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakItemProviderParams,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAi,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakParams,
+ AgentV1SettingsAgentThink,
+ AgentV1SettingsAgentThinkContextLength,
+ AgentV1SettingsAgentThinkContextLengthParams,
+ AgentV1SettingsAgentThinkEndpoint,
+ AgentV1SettingsAgentThinkEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItem,
+ AgentV1SettingsAgentThinkFunctionsItemEndpoint,
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemParams,
+ AgentV1SettingsAgentThinkParams,
+ AgentV1SettingsAgentThinkProvider,
+ AgentV1SettingsAgentThinkProviderCredentials,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentials,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsType,
+ AgentV1SettingsAgentThinkProviderCredentialsModel,
+ AgentV1SettingsAgentThinkProviderCredentialsParams,
+ AgentV1SettingsAgentThinkProviderModel,
+ AgentV1SettingsAgentThinkProviderModelParams,
+ AgentV1SettingsAgentThinkProviderParams,
+ AgentV1SettingsAgentThinkProviderThree,
+ AgentV1SettingsAgentThinkProviderThreeModel,
+ AgentV1SettingsAgentThinkProviderThreeParams,
+ AgentV1SettingsAgentThinkProviderTwo,
+ AgentV1SettingsAgentThinkProviderTwoModel,
+ AgentV1SettingsAgentThinkProviderTwoParams,
+ AgentV1SettingsAgentThinkProviderZero,
+ AgentV1SettingsAgentThinkProviderZeroModel,
+ AgentV1SettingsAgentThinkProviderZeroParams,
+ AgentV1SettingsApplied,
+ AgentV1SettingsAppliedParams,
+ AgentV1SettingsAudio,
+ AgentV1SettingsAudioInput,
+ AgentV1SettingsAudioInputEncoding,
+ AgentV1SettingsAudioInputParams,
+ AgentV1SettingsAudioOutput,
+ AgentV1SettingsAudioOutputEncoding,
+ AgentV1SettingsAudioOutputParams,
+ AgentV1SettingsAudioParams,
+ AgentV1SettingsFlags,
+ AgentV1SettingsFlagsParams,
+ AgentV1SettingsParams,
+ AgentV1SpeakUpdated,
+ AgentV1SpeakUpdatedParams,
+ AgentV1UpdatePrompt,
+ AgentV1UpdatePromptParams,
+ AgentV1UpdateSpeak,
+ AgentV1UpdateSpeakParams,
+ AgentV1UpdateSpeakSpeak,
+ AgentV1UpdateSpeakSpeakEndpoint,
+ AgentV1UpdateSpeakSpeakEndpointParams,
+ AgentV1UpdateSpeakSpeakParams,
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProviderAwsPolly,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyEngine,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesia,
+ AgentV1UpdateSpeakSpeakProviderCartesiaModelId,
+ AgentV1UpdateSpeakSpeakProviderCartesiaParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams,
+ AgentV1UpdateSpeakSpeakProviderDeepgram,
+ AgentV1UpdateSpeakSpeakProviderDeepgramModel,
+ AgentV1UpdateSpeakSpeakProviderDeepgramParams,
+ AgentV1UpdateSpeakSpeakProviderElevenLabs,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAi,
+ AgentV1UpdateSpeakSpeakProviderOpenAiModel,
+ AgentV1UpdateSpeakSpeakProviderOpenAiParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAiVoice,
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UserStartedSpeaking,
+ AgentV1UserStartedSpeakingParams,
+ AgentV1Warning,
+ AgentV1WarningParams,
+ AgentV1Welcome,
+ AgentV1WelcomeParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".v1",
+ "AgentV1AgentAudioDoneParams": ".v1",
+ "AgentV1AgentStartedSpeaking": ".v1",
+ "AgentV1AgentStartedSpeakingParams": ".v1",
+ "AgentV1AgentThinking": ".v1",
+ "AgentV1AgentThinkingParams": ".v1",
+ "AgentV1ConversationText": ".v1",
+ "AgentV1ConversationTextParams": ".v1",
+ "AgentV1ConversationTextRole": ".v1",
+ "AgentV1Error": ".v1",
+ "AgentV1ErrorParams": ".v1",
+ "AgentV1FunctionCallRequest": ".v1",
+ "AgentV1FunctionCallRequestFunctionsItem": ".v1",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".v1",
+ "AgentV1FunctionCallRequestParams": ".v1",
+ "AgentV1InjectAgentMessage": ".v1",
+ "AgentV1InjectAgentMessageParams": ".v1",
+ "AgentV1InjectUserMessage": ".v1",
+ "AgentV1InjectUserMessageParams": ".v1",
+ "AgentV1InjectionRefused": ".v1",
+ "AgentV1InjectionRefusedParams": ".v1",
+ "AgentV1KeepAlive": ".v1",
+ "AgentV1KeepAliveParams": ".v1",
+ "AgentV1PromptUpdated": ".v1",
+ "AgentV1PromptUpdatedParams": ".v1",
+ "AgentV1ReceiveFunctionCallResponse": ".v1",
+ "AgentV1ReceiveFunctionCallResponseParams": ".v1",
+ "AgentV1SendFunctionCallResponse": ".v1",
+ "AgentV1SendFunctionCallResponseParams": ".v1",
+ "AgentV1Settings": ".v1",
+ "AgentV1SettingsAgent": ".v1",
+ "AgentV1SettingsAgentContext": ".v1",
+ "AgentV1SettingsAgentContextMessagesItem": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".v1",
+ "AgentV1SettingsAgentContextParams": ".v1",
+ "AgentV1SettingsAgentListen": ".v1",
+ "AgentV1SettingsAgentListenParams": ".v1",
+ "AgentV1SettingsAgentListenProvider": ".v1",
+ "AgentV1SettingsAgentListenProviderParams": ".v1",
+ "AgentV1SettingsAgentParams": ".v1",
+ "AgentV1SettingsAgentSpeak": ".v1",
+ "AgentV1SettingsAgentSpeakEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakItem": ".v1",
+ "AgentV1SettingsAgentSpeakItemEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakItemEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice": ".v1",
+ "AgentV1SettingsAgentSpeakItemProviderParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakParams": ".v1",
+ "AgentV1SettingsAgentThink": ".v1",
+ "AgentV1SettingsAgentThinkContextLength": ".v1",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".v1",
+ "AgentV1SettingsAgentThinkEndpoint": ".v1",
+ "AgentV1SettingsAgentThinkEndpointParams": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".v1",
+ "AgentV1SettingsAgentThinkParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentials": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderModelParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderThree": ".v1",
+ "AgentV1SettingsAgentThinkProviderThreeModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderThreeParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderTwo": ".v1",
+ "AgentV1SettingsAgentThinkProviderTwoModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderTwoParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderZero": ".v1",
+ "AgentV1SettingsAgentThinkProviderZeroModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderZeroParams": ".v1",
+ "AgentV1SettingsApplied": ".v1",
+ "AgentV1SettingsAppliedParams": ".v1",
+ "AgentV1SettingsAudio": ".v1",
+ "AgentV1SettingsAudioInput": ".v1",
+ "AgentV1SettingsAudioInputEncoding": ".v1",
+ "AgentV1SettingsAudioInputParams": ".v1",
+ "AgentV1SettingsAudioOutput": ".v1",
+ "AgentV1SettingsAudioOutputEncoding": ".v1",
+ "AgentV1SettingsAudioOutputParams": ".v1",
+ "AgentV1SettingsAudioParams": ".v1",
+ "AgentV1SettingsFlags": ".v1",
+ "AgentV1SettingsFlagsParams": ".v1",
+ "AgentV1SettingsParams": ".v1",
+ "AgentV1SpeakUpdated": ".v1",
+ "AgentV1SpeakUpdatedParams": ".v1",
+ "AgentV1UpdatePrompt": ".v1",
+ "AgentV1UpdatePromptParams": ".v1",
+ "AgentV1UpdateSpeak": ".v1",
+ "AgentV1UpdateSpeakParams": ".v1",
+ "AgentV1UpdateSpeakSpeak": ".v1",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".v1",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".v1",
+ "AgentV1UpdateSpeakSpeakParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".v1",
+ "AgentV1UserStartedSpeaking": ".v1",
+ "AgentV1UserStartedSpeakingParams": ".v1",
+ "AgentV1Warning": ".v1",
+ "AgentV1WarningParams": ".v1",
+ "AgentV1Welcome": ".v1",
+ "AgentV1WelcomeParams": ".v1",
+ "v1": ".v1",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +469,223 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1"]
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinking",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextParams",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefused",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAlive",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdated",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakItem",
+ "AgentV1SettingsAgentSpeakItemEndpoint",
+ "AgentV1SettingsAgentSpeakItemEndpointParams",
+ "AgentV1SettingsAgentSpeakItemParams",
+ "AgentV1SettingsAgentSpeakItemProvider",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakItemProviderParams",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderModel",
+ "AgentV1SettingsAgentThinkProviderModelParams",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProviderThree",
+ "AgentV1SettingsAgentThinkProviderThreeModel",
+ "AgentV1SettingsAgentThinkProviderThreeParams",
+ "AgentV1SettingsAgentThinkProviderTwo",
+ "AgentV1SettingsAgentThinkProviderTwoModel",
+ "AgentV1SettingsAgentThinkProviderTwoParams",
+ "AgentV1SettingsAgentThinkProviderZero",
+ "AgentV1SettingsAgentThinkProviderZeroModel",
+ "AgentV1SettingsAgentThinkProviderZeroParams",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlags",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdated",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1Warning",
+ "AgentV1WarningParams",
+ "AgentV1Welcome",
+ "AgentV1WelcomeParams",
+ "v1",
+]
diff --git a/src/deepgram/agent/v1/__init__.py b/src/deepgram/agent/v1/__init__.py
index 31fcb147..0786c47b 100644
--- a/src/deepgram/agent/v1/__init__.py
+++ b/src/deepgram/agent/v1/__init__.py
@@ -6,8 +6,448 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ AgentV1AgentAudioDone,
+ AgentV1AgentStartedSpeaking,
+ AgentV1AgentThinking,
+ AgentV1ConversationText,
+ AgentV1ConversationTextRole,
+ AgentV1Error,
+ AgentV1FunctionCallRequest,
+ AgentV1FunctionCallRequestFunctionsItem,
+ AgentV1InjectAgentMessage,
+ AgentV1InjectUserMessage,
+ AgentV1InjectionRefused,
+ AgentV1KeepAlive,
+ AgentV1PromptUpdated,
+ AgentV1ReceiveFunctionCallResponse,
+ AgentV1SendFunctionCallResponse,
+ AgentV1Settings,
+ AgentV1SettingsAgent,
+ AgentV1SettingsAgentContext,
+ AgentV1SettingsAgentContextMessagesItem,
+ AgentV1SettingsAgentContextMessagesItemContent,
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ AgentV1SettingsAgentListen,
+ AgentV1SettingsAgentListenProvider,
+ AgentV1SettingsAgentSpeak,
+ AgentV1SettingsAgentSpeakEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpoint,
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesia,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgram,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAi,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakItem,
+ AgentV1SettingsAgentSpeakItemEndpoint,
+ AgentV1SettingsAgentSpeakItemProvider,
+ AgentV1SettingsAgentSpeakItemProviderAwsPolly,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakItemProviderCartesia,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakItemProviderDeepgram,
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabs,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakItemProviderOpenAi,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAi,
+ AgentV1SettingsAgentThink,
+ AgentV1SettingsAgentThinkContextLength,
+ AgentV1SettingsAgentThinkEndpoint,
+ AgentV1SettingsAgentThinkFunctionsItem,
+ AgentV1SettingsAgentThinkFunctionsItemEndpoint,
+ AgentV1SettingsAgentThinkProvider,
+ AgentV1SettingsAgentThinkProviderCredentials,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentials,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsType,
+ AgentV1SettingsAgentThinkProviderCredentialsModel,
+ AgentV1SettingsAgentThinkProviderModel,
+ AgentV1SettingsAgentThinkProviderThree,
+ AgentV1SettingsAgentThinkProviderThreeModel,
+ AgentV1SettingsAgentThinkProviderTwo,
+ AgentV1SettingsAgentThinkProviderTwoModel,
+ AgentV1SettingsAgentThinkProviderZero,
+ AgentV1SettingsAgentThinkProviderZeroModel,
+ AgentV1SettingsApplied,
+ AgentV1SettingsAudio,
+ AgentV1SettingsAudioInput,
+ AgentV1SettingsAudioInputEncoding,
+ AgentV1SettingsAudioOutput,
+ AgentV1SettingsAudioOutputEncoding,
+ AgentV1SettingsFlags,
+ AgentV1SpeakUpdated,
+ AgentV1UpdatePrompt,
+ AgentV1UpdateSpeak,
+ AgentV1UpdateSpeakSpeak,
+ AgentV1UpdateSpeakSpeakEndpoint,
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProviderAwsPolly,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyEngine,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesia,
+ AgentV1UpdateSpeakSpeakProviderCartesiaModelId,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoice,
+ AgentV1UpdateSpeakSpeakProviderDeepgram,
+ AgentV1UpdateSpeakSpeakProviderDeepgramModel,
+ AgentV1UpdateSpeakSpeakProviderElevenLabs,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ AgentV1UpdateSpeakSpeakProviderOpenAi,
+ AgentV1UpdateSpeakSpeakProviderOpenAiModel,
+ AgentV1UpdateSpeakSpeakProviderOpenAiVoice,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UserStartedSpeaking,
+ AgentV1Warning,
+ AgentV1Welcome,
+ )
from . import settings
-_dynamic_imports: typing.Dict[str, str] = {"settings": ".settings"}
+ from .requests import (
+ AgentV1AgentAudioDoneParams,
+ AgentV1AgentStartedSpeakingParams,
+ AgentV1AgentThinkingParams,
+ AgentV1ConversationTextParams,
+ AgentV1ErrorParams,
+ AgentV1FunctionCallRequestFunctionsItemParams,
+ AgentV1FunctionCallRequestParams,
+ AgentV1InjectAgentMessageParams,
+ AgentV1InjectUserMessageParams,
+ AgentV1InjectionRefusedParams,
+ AgentV1KeepAliveParams,
+ AgentV1PromptUpdatedParams,
+ AgentV1ReceiveFunctionCallResponseParams,
+ AgentV1SendFunctionCallResponseParams,
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ AgentV1SettingsAgentContextMessagesItemParams,
+ AgentV1SettingsAgentContextParams,
+ AgentV1SettingsAgentListenParams,
+ AgentV1SettingsAgentListenProviderParams,
+ AgentV1SettingsAgentParams,
+ AgentV1SettingsAgentSpeakEndpointEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams,
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakItemEndpointParams,
+ AgentV1SettingsAgentSpeakItemParams,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaParams,
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakItemProviderDeepgramParams,
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProviderOpenAiParams,
+ AgentV1SettingsAgentSpeakItemProviderParams,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakParams,
+ AgentV1SettingsAgentThinkContextLengthParams,
+ AgentV1SettingsAgentThinkEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemParams,
+ AgentV1SettingsAgentThinkParams,
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams,
+ AgentV1SettingsAgentThinkProviderCredentialsParams,
+ AgentV1SettingsAgentThinkProviderModelParams,
+ AgentV1SettingsAgentThinkProviderParams,
+ AgentV1SettingsAgentThinkProviderThreeParams,
+ AgentV1SettingsAgentThinkProviderTwoParams,
+ AgentV1SettingsAgentThinkProviderZeroParams,
+ AgentV1SettingsAppliedParams,
+ AgentV1SettingsAudioInputParams,
+ AgentV1SettingsAudioOutputParams,
+ AgentV1SettingsAudioParams,
+ AgentV1SettingsFlagsParams,
+ AgentV1SettingsParams,
+ AgentV1SpeakUpdatedParams,
+ AgentV1UpdatePromptParams,
+ AgentV1UpdateSpeakParams,
+ AgentV1UpdateSpeakSpeakEndpointParams,
+ AgentV1UpdateSpeakSpeakParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams,
+ AgentV1UpdateSpeakSpeakProviderDeepgramParams,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAiParams,
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UserStartedSpeakingParams,
+ AgentV1WarningParams,
+ AgentV1WelcomeParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".types",
+ "AgentV1AgentAudioDoneParams": ".requests",
+ "AgentV1AgentStartedSpeaking": ".types",
+ "AgentV1AgentStartedSpeakingParams": ".requests",
+ "AgentV1AgentThinking": ".types",
+ "AgentV1AgentThinkingParams": ".requests",
+ "AgentV1ConversationText": ".types",
+ "AgentV1ConversationTextParams": ".requests",
+ "AgentV1ConversationTextRole": ".types",
+ "AgentV1Error": ".types",
+ "AgentV1ErrorParams": ".requests",
+ "AgentV1FunctionCallRequest": ".types",
+ "AgentV1FunctionCallRequestFunctionsItem": ".types",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".requests",
+ "AgentV1FunctionCallRequestParams": ".requests",
+ "AgentV1InjectAgentMessage": ".types",
+ "AgentV1InjectAgentMessageParams": ".requests",
+ "AgentV1InjectUserMessage": ".types",
+ "AgentV1InjectUserMessageParams": ".requests",
+ "AgentV1InjectionRefused": ".types",
+ "AgentV1InjectionRefusedParams": ".requests",
+ "AgentV1KeepAlive": ".types",
+ "AgentV1KeepAliveParams": ".requests",
+ "AgentV1PromptUpdated": ".types",
+ "AgentV1PromptUpdatedParams": ".requests",
+ "AgentV1ReceiveFunctionCallResponse": ".types",
+ "AgentV1ReceiveFunctionCallResponseParams": ".requests",
+ "AgentV1SendFunctionCallResponse": ".types",
+ "AgentV1SendFunctionCallResponseParams": ".requests",
+ "AgentV1Settings": ".types",
+ "AgentV1SettingsAgent": ".types",
+ "AgentV1SettingsAgentContext": ".types",
+ "AgentV1SettingsAgentContextMessagesItem": ".types",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".types",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".requests",
+ "AgentV1SettingsAgentContextParams": ".requests",
+ "AgentV1SettingsAgentListen": ".types",
+ "AgentV1SettingsAgentListenParams": ".requests",
+ "AgentV1SettingsAgentListenProvider": ".types",
+ "AgentV1SettingsAgentListenProviderParams": ".requests",
+ "AgentV1SettingsAgentParams": ".requests",
+ "AgentV1SettingsAgentSpeak": ".types",
+ "AgentV1SettingsAgentSpeakEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakItem": ".types",
+ "AgentV1SettingsAgentSpeakItemEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakItemEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice": ".types",
+ "AgentV1SettingsAgentSpeakItemProviderParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia": ".types",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram": ".types",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi": ".types",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakParams": ".requests",
+ "AgentV1SettingsAgentThink": ".types",
+ "AgentV1SettingsAgentThinkContextLength": ".types",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".requests",
+ "AgentV1SettingsAgentThinkEndpoint": ".types",
+ "AgentV1SettingsAgentThinkEndpointParams": ".requests",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".types",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".types",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".requests",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".requests",
+ "AgentV1SettingsAgentThinkParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider": ".types",
+ "AgentV1SettingsAgentThinkProviderCredentials": ".types",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials": ".types",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType": ".types",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel": ".types",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderModel": ".types",
+ "AgentV1SettingsAgentThinkProviderModelParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderThree": ".types",
+ "AgentV1SettingsAgentThinkProviderThreeModel": ".types",
+ "AgentV1SettingsAgentThinkProviderThreeParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderTwo": ".types",
+ "AgentV1SettingsAgentThinkProviderTwoModel": ".types",
+ "AgentV1SettingsAgentThinkProviderTwoParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderZero": ".types",
+ "AgentV1SettingsAgentThinkProviderZeroModel": ".types",
+ "AgentV1SettingsAgentThinkProviderZeroParams": ".requests",
+ "AgentV1SettingsApplied": ".types",
+ "AgentV1SettingsAppliedParams": ".requests",
+ "AgentV1SettingsAudio": ".types",
+ "AgentV1SettingsAudioInput": ".types",
+ "AgentV1SettingsAudioInputEncoding": ".types",
+ "AgentV1SettingsAudioInputParams": ".requests",
+ "AgentV1SettingsAudioOutput": ".types",
+ "AgentV1SettingsAudioOutputEncoding": ".types",
+ "AgentV1SettingsAudioOutputParams": ".requests",
+ "AgentV1SettingsAudioParams": ".requests",
+ "AgentV1SettingsFlags": ".types",
+ "AgentV1SettingsFlagsParams": ".requests",
+ "AgentV1SettingsParams": ".requests",
+ "AgentV1SpeakUpdated": ".types",
+ "AgentV1SpeakUpdatedParams": ".requests",
+ "AgentV1UpdatePrompt": ".types",
+ "AgentV1UpdatePromptParams": ".requests",
+ "AgentV1UpdateSpeak": ".types",
+ "AgentV1UpdateSpeakParams": ".requests",
+ "AgentV1UpdateSpeakSpeak": ".types",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".types",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".requests",
+ "AgentV1UpdateSpeakSpeakParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".types",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".types",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".types",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".types",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".types",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".types",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".requests",
+ "AgentV1UserStartedSpeaking": ".types",
+ "AgentV1UserStartedSpeakingParams": ".requests",
+ "AgentV1Warning": ".types",
+ "AgentV1WarningParams": ".requests",
+ "AgentV1Welcome": ".types",
+ "AgentV1WelcomeParams": ".requests",
+ "settings": ".settings",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +471,223 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["settings"]
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinking",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextParams",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefused",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAlive",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdated",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakItem",
+ "AgentV1SettingsAgentSpeakItemEndpoint",
+ "AgentV1SettingsAgentSpeakItemEndpointParams",
+ "AgentV1SettingsAgentSpeakItemParams",
+ "AgentV1SettingsAgentSpeakItemProvider",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakItemProviderParams",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderModel",
+ "AgentV1SettingsAgentThinkProviderModelParams",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProviderThree",
+ "AgentV1SettingsAgentThinkProviderThreeModel",
+ "AgentV1SettingsAgentThinkProviderThreeParams",
+ "AgentV1SettingsAgentThinkProviderTwo",
+ "AgentV1SettingsAgentThinkProviderTwoModel",
+ "AgentV1SettingsAgentThinkProviderTwoParams",
+ "AgentV1SettingsAgentThinkProviderZero",
+ "AgentV1SettingsAgentThinkProviderZeroModel",
+ "AgentV1SettingsAgentThinkProviderZeroParams",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlags",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdated",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1Warning",
+ "AgentV1WarningParams",
+ "AgentV1Welcome",
+ "AgentV1WelcomeParams",
+ "settings",
+]
diff --git a/src/deepgram/agent/v1/requests/__init__.py b/src/deepgram/agent/v1/requests/__init__.py
new file mode 100644
index 00000000..b0e8707e
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/__init__.py
@@ -0,0 +1,351 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .agent_v1agent_audio_done import AgentV1AgentAudioDoneParams
+ from .agent_v1agent_started_speaking import AgentV1AgentStartedSpeakingParams
+ from .agent_v1agent_thinking import AgentV1AgentThinkingParams
+ from .agent_v1conversation_text import AgentV1ConversationTextParams
+ from .agent_v1error import AgentV1ErrorParams
+ from .agent_v1function_call_request import AgentV1FunctionCallRequestParams
+ from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItemParams
+ from .agent_v1inject_agent_message import AgentV1InjectAgentMessageParams
+ from .agent_v1inject_user_message import AgentV1InjectUserMessageParams
+ from .agent_v1injection_refused import AgentV1InjectionRefusedParams
+ from .agent_v1keep_alive import AgentV1KeepAliveParams
+ from .agent_v1prompt_updated import AgentV1PromptUpdatedParams
+ from .agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponseParams
+ from .agent_v1send_function_call_response import AgentV1SendFunctionCallResponseParams
+ from .agent_v1settings import AgentV1SettingsParams
+ from .agent_v1settings_agent import AgentV1SettingsAgentParams
+ from .agent_v1settings_agent_context import AgentV1SettingsAgentContextParams
+ from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams
+ from .agent_v1settings_agent_context_messages_item_content import (
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ )
+ from .agent_v1settings_agent_listen import AgentV1SettingsAgentListenParams
+ from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProviderParams
+ from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams
+ from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams
+ from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams
+ from .agent_v1settings_agent_speak_endpoint_provider import (
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_deepgram import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams,
+ )
+ from .agent_v1settings_agent_speak_item import AgentV1SettingsAgentSpeakItemParams
+ from .agent_v1settings_agent_speak_item_endpoint import AgentV1SettingsAgentSpeakItemEndpointParams
+ from .agent_v1settings_agent_speak_item_provider import (
+ AgentV1SettingsAgentSpeakItemProviderParams,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAiParams,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyParams,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1settings_agent_speak_item_provider_cartesia import AgentV1SettingsAgentSpeakItemProviderCartesiaParams
+ from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams,
+ )
+ from .agent_v1settings_agent_speak_item_provider_deepgram import AgentV1SettingsAgentSpeakItemProviderDeepgramParams
+ from .agent_v1settings_agent_speak_item_provider_eleven_labs import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsParams,
+ )
+ from .agent_v1settings_agent_speak_item_provider_open_ai import AgentV1SettingsAgentSpeakItemProviderOpenAiParams
+ from .agent_v1settings_agent_think import AgentV1SettingsAgentThinkParams
+ from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLengthParams
+ from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpointParams
+ from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItemParams
+ from .agent_v1settings_agent_think_functions_item_endpoint import (
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ )
+ from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProviderParams
+ from .agent_v1settings_agent_think_provider_credentials import AgentV1SettingsAgentThinkProviderCredentialsParams
+ from .agent_v1settings_agent_think_provider_credentials_credentials import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams,
+ )
+ from .agent_v1settings_agent_think_provider_model import AgentV1SettingsAgentThinkProviderModelParams
+ from .agent_v1settings_agent_think_provider_three import AgentV1SettingsAgentThinkProviderThreeParams
+ from .agent_v1settings_agent_think_provider_two import AgentV1SettingsAgentThinkProviderTwoParams
+ from .agent_v1settings_agent_think_provider_zero import AgentV1SettingsAgentThinkProviderZeroParams
+ from .agent_v1settings_applied import AgentV1SettingsAppliedParams
+ from .agent_v1settings_audio import AgentV1SettingsAudioParams
+ from .agent_v1settings_audio_input import AgentV1SettingsAudioInputParams
+ from .agent_v1settings_audio_output import AgentV1SettingsAudioOutputParams
+ from .agent_v1settings_flags import AgentV1SettingsFlagsParams
+ from .agent_v1speak_updated import AgentV1SpeakUpdatedParams
+ from .agent_v1update_prompt import AgentV1UpdatePromptParams
+ from .agent_v1update_speak import AgentV1UpdateSpeakParams
+ from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams
+ from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams
+ from .agent_v1update_speak_speak_provider import (
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly import AgentV1UpdateSpeakSpeakProviderAwsPollyParams
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1update_speak_speak_provider_cartesia import AgentV1UpdateSpeakSpeakProviderCartesiaParams
+ from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ from .agent_v1update_speak_speak_provider_deepgram import AgentV1UpdateSpeakSpeakProviderDeepgramParams
+ from .agent_v1update_speak_speak_provider_eleven_labs import AgentV1UpdateSpeakSpeakProviderElevenLabsParams
+ from .agent_v1update_speak_speak_provider_open_ai import AgentV1UpdateSpeakSpeakProviderOpenAiParams
+ from .agent_v1user_started_speaking import AgentV1UserStartedSpeakingParams
+ from .agent_v1warning import AgentV1WarningParams
+ from .agent_v1welcome import AgentV1WelcomeParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDoneParams": ".agent_v1agent_audio_done",
+ "AgentV1AgentStartedSpeakingParams": ".agent_v1agent_started_speaking",
+ "AgentV1AgentThinkingParams": ".agent_v1agent_thinking",
+ "AgentV1ConversationTextParams": ".agent_v1conversation_text",
+ "AgentV1ErrorParams": ".agent_v1error",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".agent_v1function_call_request_functions_item",
+ "AgentV1FunctionCallRequestParams": ".agent_v1function_call_request",
+ "AgentV1InjectAgentMessageParams": ".agent_v1inject_agent_message",
+ "AgentV1InjectUserMessageParams": ".agent_v1inject_user_message",
+ "AgentV1InjectionRefusedParams": ".agent_v1injection_refused",
+ "AgentV1KeepAliveParams": ".agent_v1keep_alive",
+ "AgentV1PromptUpdatedParams": ".agent_v1prompt_updated",
+ "AgentV1ReceiveFunctionCallResponseParams": ".agent_v1receive_function_call_response",
+ "AgentV1SendFunctionCallResponseParams": ".agent_v1send_function_call_response",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".agent_v1settings_agent_context_messages_item_content",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".agent_v1settings_agent_context_messages_item_function_calls_function_calls_item",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".agent_v1settings_agent_context_messages_item_function_calls",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".agent_v1settings_agent_context_messages_item",
+ "AgentV1SettingsAgentContextParams": ".agent_v1settings_agent_context",
+ "AgentV1SettingsAgentListenParams": ".agent_v1settings_agent_listen",
+ "AgentV1SettingsAgentListenProviderParams": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentParams": ".agent_v1settings_agent",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".agent_v1settings_agent_speak_endpoint_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".agent_v1settings_agent_speak_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams": ".agent_v1settings_agent_speak_endpoint_provider_cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams": ".agent_v1settings_agent_speak_endpoint_provider_deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".agent_v1settings_agent_speak_endpoint_provider_open_ai",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakItemEndpointParams": ".agent_v1settings_agent_speak_item_endpoint",
+ "AgentV1SettingsAgentSpeakItemParams": ".agent_v1settings_agent_speak_item",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_item_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams": ".agent_v1settings_agent_speak_item_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams": ".agent_v1settings_agent_speak_item_provider_cartesia",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_item_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams": ".agent_v1settings_agent_speak_item_provider_deepgram",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams": ".agent_v1settings_agent_speak_item_provider_eleven_labs",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams": ".agent_v1settings_agent_speak_item_provider_open_ai",
+ "AgentV1SettingsAgentSpeakItemProviderParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakParams": ".agent_v1settings_agent_speak",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".agent_v1settings_agent_think_context_length",
+ "AgentV1SettingsAgentThinkEndpointParams": ".agent_v1settings_agent_think_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".agent_v1settings_agent_think_functions_item_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".agent_v1settings_agent_think_functions_item",
+ "AgentV1SettingsAgentThinkParams": ".agent_v1settings_agent_think",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams": ".agent_v1settings_agent_think_provider_credentials_credentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams": ".agent_v1settings_agent_think_provider_credentials",
+ "AgentV1SettingsAgentThinkProviderModelParams": ".agent_v1settings_agent_think_provider_model",
+ "AgentV1SettingsAgentThinkProviderParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProviderThreeParams": ".agent_v1settings_agent_think_provider_three",
+ "AgentV1SettingsAgentThinkProviderTwoParams": ".agent_v1settings_agent_think_provider_two",
+ "AgentV1SettingsAgentThinkProviderZeroParams": ".agent_v1settings_agent_think_provider_zero",
+ "AgentV1SettingsAppliedParams": ".agent_v1settings_applied",
+ "AgentV1SettingsAudioInputParams": ".agent_v1settings_audio_input",
+ "AgentV1SettingsAudioOutputParams": ".agent_v1settings_audio_output",
+ "AgentV1SettingsAudioParams": ".agent_v1settings_audio",
+ "AgentV1SettingsFlagsParams": ".agent_v1settings_flags",
+ "AgentV1SettingsParams": ".agent_v1settings",
+ "AgentV1SpeakUpdatedParams": ".agent_v1speak_updated",
+ "AgentV1UpdatePromptParams": ".agent_v1update_prompt",
+ "AgentV1UpdateSpeakParams": ".agent_v1update_speak",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".agent_v1update_speak_speak_endpoint",
+ "AgentV1UpdateSpeakSpeakParams": ".agent_v1update_speak_speak",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".agent_v1update_speak_speak_provider_aws_polly_credentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".agent_v1update_speak_speak_provider_aws_polly",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".agent_v1update_speak_speak_provider_cartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".agent_v1update_speak_speak_provider_cartesia_voice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".agent_v1update_speak_speak_provider_deepgram",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".agent_v1update_speak_speak_provider_eleven_labs",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".agent_v1update_speak_speak_provider_open_ai",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UserStartedSpeakingParams": ".agent_v1user_started_speaking",
+ "AgentV1WarningParams": ".agent_v1warning",
+ "AgentV1WelcomeParams": ".agent_v1welcome",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationTextParams",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakItemEndpointParams",
+ "AgentV1SettingsAgentSpeakItemParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiParams",
+ "AgentV1SettingsAgentSpeakItemProviderParams",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderModelParams",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProviderThreeParams",
+ "AgentV1SettingsAgentThinkProviderTwoParams",
+ "AgentV1SettingsAgentThinkProviderZeroParams",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1WarningParams",
+ "AgentV1WelcomeParams",
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py
new file mode 100644
index 00000000..43b4f013
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentAudioDoneParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentAudioDone"]
+ """
+ Message type identifier indicating the agent has finished sending audio
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py
new file mode 100644
index 00000000..39861c94
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentStartedSpeakingParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentStartedSpeaking"]
+ """
+ Message type identifier for agent started speaking
+ """
+
+ total_latency: float
+ """
+ Seconds from receiving the user's utterance to producing the agent's reply
+ """
+
+ tts_latency: float
+ """
+ The portion of total latency attributable to text-to-speech
+ """
+
+ ttt_latency: float
+ """
+ The portion of total latency attributable to text-to-text (usually an LLM)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py
new file mode 100644
index 00000000..13434cbc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentThinkingParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentThinking"]
+ """
+ Message type identifier for agent thinking
+ """
+
+ content: str
+ """
+ The text of the agent's thought process
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1conversation_text.py b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py
new file mode 100644
index 00000000..ea0601e3
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1conversation_text_role import AgentV1ConversationTextRole
+
+
+class AgentV1ConversationTextParams(typing_extensions.TypedDict):
+ type: typing.Literal["ConversationText"]
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1ConversationTextRole
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str
+ """
+ The actual statement that was spoken
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1error.py b/src/deepgram/agent/v1/requests/agent_v1error.py
new file mode 100644
index 00000000..23547cb7
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1error.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1ErrorParams(typing_extensions.TypedDict):
+ type: typing.Literal["Error"]
+ """
+ Message type identifier for error responses
+ """
+
+ description: str
+ """
+ A description of what went wrong
+ """
+
+ code: str
+ """
+ Error code identifying the type of error
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py
new file mode 100644
index 00000000..b00cc6d4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItemParams
+
+
+class AgentV1FunctionCallRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["FunctionCallRequest"]
+ """
+ Message type identifier for function call requests
+ """
+
+ functions: typing.Sequence[AgentV1FunctionCallRequestFunctionsItemParams]
+ """
+ Array of functions to be called
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py
new file mode 100644
index 00000000..bdc26719
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1FunctionCallRequestFunctionsItemParams(typing_extensions.TypedDict):
+ id: str
+ """
+ Unique identifier for the function call
+ """
+
+ name: str
+ """
+ The name of the function to call
+ """
+
+ arguments: str
+ """
+ JSON string containing the function arguments
+ """
+
+ client_side: bool
+ """
+ Whether the function should be executed client-side
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py
new file mode 100644
index 00000000..8fb718bd
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectAgentMessageParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectAgentMessage"]
+ """
+ Message type identifier for injecting an agent message
+ """
+
+ message: str
+ """
+ The statement that the agent should say
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py
new file mode 100644
index 00000000..86583a81
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectUserMessageParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectUserMessage"]
+ """
+ Message type identifier for injecting a user message
+ """
+
+ content: str
+ """
+ The specific phrase or statement the agent should respond to
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1injection_refused.py b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py
new file mode 100644
index 00000000..e19f3241
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectionRefusedParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectionRefused"]
+ """
+ Message type identifier for injection refused
+ """
+
+ message: str
+ """
+ Details about why the injection was refused
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1keep_alive.py b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py
new file mode 100644
index 00000000..125eb8ae
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1KeepAliveParams(typing_extensions.TypedDict):
+ """
+ Send a control message to the agent
+ """
+
+ type: typing.Literal["KeepAlive"]
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py
new file mode 100644
index 00000000..40d5a426
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1PromptUpdatedParams(typing_extensions.TypedDict):
+ type: typing.Literal["PromptUpdated"]
+ """
+ Message type identifier for prompt update confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py
new file mode 100644
index 00000000..05f8050b
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1ReceiveFunctionCallResponseParams(typing_extensions.TypedDict):
+ """
+ Function call response message used bidirectionally:
+
+ β’ **Client β Server**: Response after client executes a function
+ marked as client_side: true
+ β’ **Server β Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"]
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing_extensions.NotRequired[str]
+ """
+ The unique identifier for the function call.
+
+ β’ **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ β’ **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str
+ """
+ The name of the function being called
+ """
+
+ content: str
+ """
+ The content or result of the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py
new file mode 100644
index 00000000..765b6f7c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SendFunctionCallResponseParams(typing_extensions.TypedDict):
+ """
+ Function call response message used bidirectionally:
+
+ β’ **Client β Server**: Response after client executes a function
+ marked as client_side: true
+ β’ **Server β Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"]
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing_extensions.NotRequired[str]
+ """
+ The unique identifier for the function call.
+
+ β’ **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ β’ **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str
+ """
+ The name of the function being called
+ """
+
+ content: str
+ """
+ The content or result of the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings.py b/src/deepgram/agent/v1/requests/agent_v1settings.py
new file mode 100644
index 00000000..2f748dcd
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent import AgentV1SettingsAgentParams
+from .agent_v1settings_audio import AgentV1SettingsAudioParams
+from .agent_v1settings_flags import AgentV1SettingsFlagsParams
+
+
+class AgentV1SettingsParams(typing_extensions.TypedDict):
+ type: typing.Literal["Settings"]
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Tags to associate with the request
+ """
+
+ experimental: typing_extensions.NotRequired[bool]
+ """
+ To enable experimental features
+ """
+
+ flags: typing_extensions.NotRequired[AgentV1SettingsFlagsParams]
+ mip_opt_out: typing_extensions.NotRequired[bool]
+ """
+ To opt out of Deepgram Model Improvement Program
+ """
+
+ audio: AgentV1SettingsAudioParams
+ agent: AgentV1SettingsAgentParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py
new file mode 100644
index 00000000..c048a616
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_context import AgentV1SettingsAgentContextParams
+from .agent_v1settings_agent_listen import AgentV1SettingsAgentListenParams
+from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams
+from .agent_v1settings_agent_think import AgentV1SettingsAgentThinkParams
+
+
+class AgentV1SettingsAgentParams(typing_extensions.TypedDict):
+ language: typing_extensions.NotRequired[str]
+ """
+ Agent language
+ """
+
+ context: typing_extensions.NotRequired[AgentV1SettingsAgentContextParams]
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ listen: typing_extensions.NotRequired[AgentV1SettingsAgentListenParams]
+ think: typing_extensions.NotRequired[AgentV1SettingsAgentThinkParams]
+ speak: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakParams]
+ greeting: typing_extensions.NotRequired[str]
+ """
+ Optional message that agent will speak at the start
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py
new file mode 100644
index 00000000..a27f848a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams
+
+
+class AgentV1SettingsAgentContextParams(typing_extensions.TypedDict):
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[AgentV1SettingsAgentContextMessagesItemParams]]
+ """
+ Conversation history as a list of messages and function calls
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py
new file mode 100644
index 00000000..cf31d658
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContentParams
+from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+)
+
+AgentV1SettingsAgentContextMessagesItemParams = typing.Union[
+ AgentV1SettingsAgentContextMessagesItemContentParams, AgentV1SettingsAgentContextMessagesItemFunctionCallsParams
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py
new file mode 100644
index 00000000..1a541ffc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemContentParams(typing_extensions.TypedDict):
+ """
+ Conversation text as part of the conversation history
+ """
+
+ type: typing.Literal["History"]
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1SettingsAgentContextMessagesItemContentRole
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str
+ """
+ The actual statement that was spoken
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py
new file mode 100644
index 00000000..cdc5733c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsParams(typing_extensions.TypedDict):
+ """
+ Client-side or server-side function call request and response as part of the conversation history
+ """
+
+ type: typing.Literal["History"]
+ function_calls: typing.Sequence[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams]
+ """
+ List of function call objects
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
new file mode 100644
index 00000000..9efeb23e
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams(typing_extensions.TypedDict):
+ id: str
+ """
+ Unique identifier for the function call
+ """
+
+ name: str
+ """
+ Name of the function called
+ """
+
+ client_side: bool
+ """
+ Indicates if the call was client-side or server-side
+ """
+
+ arguments: str
+ """
+ Arguments passed to the function
+ """
+
+ response: str
+ """
+ Response from the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py
new file mode 100644
index 00000000..3eb2aa41
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProviderParams
+
+
+class AgentV1SettingsAgentListenParams(typing_extensions.TypedDict):
+ provider: typing_extensions.NotRequired[AgentV1SettingsAgentListenProviderParams]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py
new file mode 100644
index 00000000..ad746f50
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentListenProviderParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ """
+ Provider type for speech-to-text
+ """
+
+ model: typing_extensions.NotRequired[str]
+ """
+ Model to use for speech to text
+ """
+
+ keyterms: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Prompt key-term recognition (nova-3 'en' only)
+ """
+
+ smart_format: typing_extensions.NotRequired[bool]
+ """
+ Applies smart formatting to improve transcript readability (Deepgram providers only)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py
new file mode 100644
index 00000000..19f7c7df
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams
+from .agent_v1settings_agent_speak_item import AgentV1SettingsAgentSpeakItemParams
+
+AgentV1SettingsAgentSpeakParams = typing.Union[
+ AgentV1SettingsAgentSpeakEndpointParams, typing.Sequence[AgentV1SettingsAgentSpeakItemParams]
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py
new file mode 100644
index 00000000..d90614be
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py
@@ -0,0 +1,14 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams
+from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProviderParams
+
+
+class AgentV1SettingsAgentSpeakEndpointParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentSpeakEndpointProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakEndpointEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py
new file mode 100644
index 00000000..3bc9c86f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakEndpointEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py
new file mode 100644
index 00000000..bbe15771
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py
@@ -0,0 +1,75 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ model: AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams
+
+
+AgentV1SettingsAgentSpeakEndpointProviderParams = typing.Union[
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
new file mode 100644
index 00000000..58af5935
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..97ad74b1
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia.py
new file mode 100644
index 00000000..e2fe184f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderCartesiaParams(typing_extensions.TypedDict):
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+ """
+ Cartesia language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
new file mode 100644
index 00000000..51bd279a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_deepgram.py
new file mode 100644
index 00000000..8ac99acc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_deepgram.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderDeepgramParams(typing_extensions.TypedDict):
+ model: AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel
+ """
+ Deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py
new file mode 100644
index 00000000..0dd894f2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderElevenLabsParams(typing_extensions.TypedDict):
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Eleven Labs optional language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py
new file mode 100644
index 00000000..7e4226b8
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams(typing_extensions.TypedDict):
+ model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice
+ """
+ OpenAI voice
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item.py
new file mode 100644
index 00000000..14a7707f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item.py
@@ -0,0 +1,14 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_speak_item_endpoint import AgentV1SettingsAgentSpeakItemEndpointParams
+from .agent_v1settings_agent_speak_item_provider import AgentV1SettingsAgentSpeakItemProviderParams
+
+
+class AgentV1SettingsAgentSpeakItemParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentSpeakItemProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakItemEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_endpoint.py
new file mode 100644
index 00000000..c1851824
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakItemEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider.py
new file mode 100644
index 00000000..97afd28b
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider.py
@@ -0,0 +1,75 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiVoice,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams,
+)
+from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ model: AgentV1SettingsAgentSpeakItemProviderDeepgramModel
+
+
+class AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ model_id: AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakItemProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ model_id: AgentV1SettingsAgentSpeakItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakItemProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ model: AgentV1SettingsAgentSpeakItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams
+
+
+AgentV1SettingsAgentSpeakItemProviderParams = typing.Union[
+ AgentV1SettingsAgentSpeakItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly.py
new file mode 100644
index 00000000..9ad0d3cd
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..0abde626
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia.py
new file mode 100644
index 00000000..e09e6c0a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderCartesiaParams(typing_extensions.TypedDict):
+ model_id: AgentV1SettingsAgentSpeakItemProviderCartesiaModelId
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+ """
+ Cartesia language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia_voice.py
new file mode 100644
index 00000000..fd595ae2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakItemProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_deepgram.py
new file mode 100644
index 00000000..af59d2d2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_deepgram.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderDeepgramParams(typing_extensions.TypedDict):
+ model: AgentV1SettingsAgentSpeakItemProviderDeepgramModel
+ """
+ Deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_eleven_labs.py
new file mode 100644
index 00000000..007161e4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_eleven_labs.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderElevenLabsParams(typing_extensions.TypedDict):
+ model_id: AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Eleven Labs optional language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_open_ai.py
new file mode 100644
index 00000000..d2c8bf31
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_item_provider_open_ai.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderOpenAiParams(typing_extensions.TypedDict):
+ model: AgentV1SettingsAgentSpeakItemProviderOpenAiModel
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakItemProviderOpenAiVoice
+ """
+ OpenAI voice
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py
new file mode 100644
index 00000000..2d5d22c2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLengthParams
+from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpointParams
+from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItemParams
+from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProviderParams
+
+
+class AgentV1SettingsAgentThinkParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentThinkProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkEndpointParams]
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ functions: typing_extensions.NotRequired[typing.Sequence[AgentV1SettingsAgentThinkFunctionsItemParams]]
+ prompt: typing_extensions.NotRequired[str]
+ context_length: typing_extensions.NotRequired[AgentV1SettingsAgentThinkContextLengthParams]
+ """
+ Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py
new file mode 100644
index 00000000..f96cd4c3
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkContextLengthParams = typing.Union[typing.Literal["max"], float]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py
new file mode 100644
index 00000000..396bbc07
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentThinkEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom LLM endpoint URL
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
+ """
+ Custom headers for the endpoint
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py
new file mode 100644
index 00000000..7ddb30e9
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpointParams
+
+
+class AgentV1SettingsAgentThinkFunctionsItemParams(typing_extensions.TypedDict):
+ name: typing_extensions.NotRequired[str]
+ """
+ Function name
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Function description
+ """
+
+ parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Function parameters
+ """
+
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkFunctionsItemEndpointParams]
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py
new file mode 100644
index 00000000..19e76bdc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentThinkFunctionsItemEndpointParams(typing_extensions.TypedDict):
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Endpoint URL
+ """
+
+ method: typing_extensions.NotRequired[str]
+ """
+ HTTP method
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py
new file mode 100644
index 00000000..76d1a6a4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_think_provider_credentials import AgentV1SettingsAgentThinkProviderCredentialsParams
+from .agent_v1settings_agent_think_provider_model import AgentV1SettingsAgentThinkProviderModelParams
+from .agent_v1settings_agent_think_provider_three import AgentV1SettingsAgentThinkProviderThreeParams
+from .agent_v1settings_agent_think_provider_two import AgentV1SettingsAgentThinkProviderTwoParams
+from .agent_v1settings_agent_think_provider_zero import AgentV1SettingsAgentThinkProviderZeroParams
+
+AgentV1SettingsAgentThinkProviderParams = typing.Union[
+ AgentV1SettingsAgentThinkProviderZeroParams,
+ AgentV1SettingsAgentThinkProviderCredentialsParams,
+ AgentV1SettingsAgentThinkProviderTwoParams,
+ AgentV1SettingsAgentThinkProviderThreeParams,
+ AgentV1SettingsAgentThinkProviderModelParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials.py
new file mode 100644
index 00000000..d17d2bf2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_credentials_model import (
+ AgentV1SettingsAgentThinkProviderCredentialsModel,
+)
+from .agent_v1settings_agent_think_provider_credentials_credentials import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentThinkProviderCredentialsParams(typing_extensions.TypedDict):
+ type: typing_extensions.NotRequired[typing.Literal["aws_bedrock"]]
+ model: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderCredentialsModel]
+ """
+ AWS Bedrock model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ AWS Bedrock temperature (0-2)
+ """
+
+ credentials: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams]
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials_credentials.py
new file mode 100644
index 00000000..7534b574
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_credentials_credentials.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_credentials_credentials_type import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsType,
+)
+
+
+class AgentV1SettingsAgentThinkProviderCredentialsCredentialsParams(typing_extensions.TypedDict):
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ type: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderCredentialsCredentialsType]
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ region: typing_extensions.NotRequired[str]
+ """
+ AWS region
+ """
+
+ access_key_id: typing_extensions.NotRequired[str]
+ """
+ AWS access key
+ """
+
+ secret_access_key: typing_extensions.NotRequired[str]
+ """
+ AWS secret access key
+ """
+
+ session_token: typing_extensions.NotRequired[str]
+ """
+ AWS session token (required for STS only)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_model.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_model.py
new file mode 100644
index 00000000..c2837c7f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_model.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentThinkProviderModelParams(typing_extensions.TypedDict):
+ type: typing_extensions.NotRequired[typing.Literal["groq"]]
+ model: typing_extensions.NotRequired[typing.Literal["openai/gpt-oss-20b"]]
+ """
+ Groq model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Groq temperature (0-2)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_three.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_three.py
new file mode 100644
index 00000000..769453ad
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_three.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_three_model import AgentV1SettingsAgentThinkProviderThreeModel
+
+
+class AgentV1SettingsAgentThinkProviderThreeParams(typing_extensions.TypedDict):
+ type: typing_extensions.NotRequired[typing.Literal["google"]]
+ model: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderThreeModel]
+ """
+ Google model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Google temperature (0-2)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_two.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_two.py
new file mode 100644
index 00000000..4be24295
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_two.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_two_model import AgentV1SettingsAgentThinkProviderTwoModel
+
+
+class AgentV1SettingsAgentThinkProviderTwoParams(typing_extensions.TypedDict):
+ type: typing_extensions.NotRequired[typing.Literal["anthropic"]]
+ model: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderTwoModel]
+ """
+ Anthropic model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Anthropic temperature (0-1)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_zero.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_zero.py
new file mode 100644
index 00000000..15419372
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_zero.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_zero_model import AgentV1SettingsAgentThinkProviderZeroModel
+
+
+class AgentV1SettingsAgentThinkProviderZeroParams(typing_extensions.TypedDict):
+ type: typing_extensions.NotRequired[typing.Literal["open_ai"]]
+ model: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderZeroModel]
+ """
+ OpenAI model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ OpenAI temperature (0-2)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_applied.py b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py
new file mode 100644
index 00000000..32bca304
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAppliedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SettingsApplied"]
+ """
+ Message type identifier for settings applied confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio.py
new file mode 100644
index 00000000..0c09d60f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_audio_input import AgentV1SettingsAudioInputParams
+from .agent_v1settings_audio_output import AgentV1SettingsAudioOutputParams
+
+
+class AgentV1SettingsAudioParams(typing_extensions.TypedDict):
+ input: typing_extensions.NotRequired[AgentV1SettingsAudioInputParams]
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ output: typing_extensions.NotRequired[AgentV1SettingsAudioOutputParams]
+ """
+ Audio output configuration settings
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py
new file mode 100644
index 00000000..91931180
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+
+
+class AgentV1SettingsAudioInputParams(typing_extensions.TypedDict):
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ encoding: AgentV1SettingsAudioInputEncoding
+ """
+ Audio encoding format
+ """
+
+ sample_rate: float
+ """
+ Sample rate in Hz. Common values are 16000, 24000, 44100, 48000
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py
new file mode 100644
index 00000000..32273699
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+
+
+class AgentV1SettingsAudioOutputParams(typing_extensions.TypedDict):
+ """
+ Audio output configuration settings
+ """
+
+ encoding: typing_extensions.NotRequired[AgentV1SettingsAudioOutputEncoding]
+ """
+ Audio encoding format for streaming TTS output
+ """
+
+ sample_rate: typing_extensions.NotRequired[float]
+ """
+ Sample rate in Hz
+ """
+
+ bitrate: typing_extensions.NotRequired[float]
+ """
+ Audio bitrate in bits per second
+ """
+
+ container: typing_extensions.NotRequired[str]
+ """
+ Audio container format. If omitted, defaults to 'none'
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_flags.py b/src/deepgram/agent/v1/requests/agent_v1settings_flags.py
new file mode 100644
index 00000000..737233a4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_flags.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsFlagsParams(typing_extensions.TypedDict):
+ history: typing_extensions.NotRequired[bool]
+ """
+ Enable or disable history message reporting
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1speak_updated.py b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py
new file mode 100644
index 00000000..908d6639
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SpeakUpdatedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SpeakUpdated"]
+ """
+ Message type identifier for speak update confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_prompt.py b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py
new file mode 100644
index 00000000..8f363a56
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UpdatePromptParams(typing_extensions.TypedDict):
+ type: typing.Literal["UpdatePrompt"]
+ """
+ Message type identifier for prompt update request
+ """
+
+ prompt: str
+ """
+ The new system prompt to be used by the agent
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak.py
new file mode 100644
index 00000000..b86d1240
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams
+
+
+class AgentV1UpdateSpeakParams(typing_extensions.TypedDict):
+ type: typing.Literal["UpdateSpeak"]
+ """
+ Message type identifier for updating the speak model
+ """
+
+ speak: AgentV1UpdateSpeakSpeakParams
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py
new file mode 100644
index 00000000..16a16e01
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams
+from .agent_v1update_speak_speak_provider import AgentV1UpdateSpeakSpeakProviderParams
+
+
+class AgentV1UpdateSpeakSpeakParams(typing_extensions.TypedDict):
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ provider: AgentV1UpdateSpeakSpeakProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py
new file mode 100644
index 00000000..43cdb3af
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UpdateSpeakSpeakEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py
new file mode 100644
index 00000000..5e9804ef
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py
@@ -0,0 +1,61 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from ..types.agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from ..types.agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from ..types.agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+from ..types.agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+)
+from ..types.agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from ..types.agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+)
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+
+
+class AgentV1UpdateSpeakSpeakProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+
+class AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1UpdateSpeakSpeakProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1UpdateSpeakSpeakProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams
+
+
+AgentV1UpdateSpeakSpeakProviderParams = typing.Union[
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py
new file mode 100644
index 00000000..bd1ee63d
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from ..types.agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..ff643278
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py
new file mode 100644
index 00000000..58f9727d
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaParams(typing_extensions.TypedDict):
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+ """
+ Cartesia language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py
new file mode 100644
index 00000000..3ff2e8be
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py
new file mode 100644
index 00000000..5252dd10
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+
+class AgentV1UpdateSpeakSpeakProviderDeepgramParams(typing_extensions.TypedDict):
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+ """
+ Deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py
new file mode 100644
index 00000000..d7a1320b
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderElevenLabsParams(typing_extensions.TypedDict):
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Eleven Labs optional language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py
new file mode 100644
index 00000000..9994267f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from ..types.agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderOpenAiParams(typing_extensions.TypedDict):
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+ """
+ OpenAI voice
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py
new file mode 100644
index 00000000..c883119c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UserStartedSpeakingParams(typing_extensions.TypedDict):
+ type: typing.Literal["UserStartedSpeaking"]
+ """
+ Message type identifier indicating that the user has begun speaking
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1warning.py b/src/deepgram/agent/v1/requests/agent_v1warning.py
new file mode 100644
index 00000000..f1e75051
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1warning.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1WarningParams(typing_extensions.TypedDict):
+ """
+ Notifies the client of non-fatal errors or warnings
+ """
+
+ type: typing.Literal["Warning"]
+ """
+ Message type identifier for warnings
+ """
+
+ description: str
+ """
+ Description of the warning
+ """
+
+ code: str
+ """
+ Warning code identifier
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1welcome.py b/src/deepgram/agent/v1/requests/agent_v1welcome.py
new file mode 100644
index 00000000..5168a4f0
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1welcome.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1WelcomeParams(typing_extensions.TypedDict):
+ type: typing.Literal["Welcome"]
+ """
+ Message type identifier for welcome message
+ """
+
+ request_id: str
+ """
+ Unique identifier for the request
+ """
diff --git a/src/deepgram/agent/v1/socket_client.py b/src/deepgram/agent/v1/socket_client.py
index f76fc9e4..a59856c5 100644
--- a/src/deepgram/agent/v1/socket_client.py
+++ b/src/deepgram/agent/v1/socket_client.py
@@ -1,5 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
-# Enhanced with binary message support, comprehensive socket types, and send methods.
import json
import typing
@@ -9,62 +8,49 @@
import websockets.sync.connection as websockets_sync_connection
from ...core.events import EventEmitterMixin, EventType
from ...core.pydantic_utilities import parse_obj_as
+from .types.agent_v1agent_audio_done import AgentV1AgentAudioDone
+from .types.agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking
+from .types.agent_v1agent_thinking import AgentV1AgentThinking
+from .types.agent_v1conversation_text import AgentV1ConversationText
+from .types.agent_v1error import AgentV1Error
+from .types.agent_v1function_call_request import AgentV1FunctionCallRequest
+from .types.agent_v1inject_agent_message import AgentV1InjectAgentMessage
+from .types.agent_v1inject_user_message import AgentV1InjectUserMessage
+from .types.agent_v1injection_refused import AgentV1InjectionRefused
+from .types.agent_v1keep_alive import AgentV1KeepAlive
+from .types.agent_v1prompt_updated import AgentV1PromptUpdated
+from .types.agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse
+from .types.agent_v1send_function_call_response import AgentV1SendFunctionCallResponse
+from .types.agent_v1settings import AgentV1Settings
+from .types.agent_v1settings_applied import AgentV1SettingsApplied
+from .types.agent_v1speak_updated import AgentV1SpeakUpdated
+from .types.agent_v1update_prompt import AgentV1UpdatePrompt
+from .types.agent_v1update_speak import AgentV1UpdateSpeak
+from .types.agent_v1user_started_speaking import AgentV1UserStartedSpeaking
+from .types.agent_v1warning import AgentV1Warning
+from .types.agent_v1welcome import AgentV1Welcome
try:
from websockets.legacy.client import WebSocketClientProtocol # type: ignore
except ImportError:
from websockets import WebSocketClientProtocol # type: ignore
-# Socket message types
-from ...extensions.types.sockets import (
- AgentV1AgentAudioDoneEvent,
- AgentV1AgentStartedSpeakingEvent,
- AgentV1AgentThinkingEvent,
- AgentV1AudioChunkEvent,
- AgentV1ControlMessage,
- AgentV1ConversationTextEvent,
- AgentV1ErrorEvent,
- AgentV1FunctionCallRequestEvent,
- AgentV1FunctionCallResponseMessage,
- AgentV1HistoryFunctionCalls,
- AgentV1HistoryMessage,
- AgentV1InjectAgentMessageMessage,
- AgentV1InjectionRefusedEvent,
- AgentV1InjectUserMessageMessage,
- AgentV1MediaMessage,
- AgentV1PromptUpdatedEvent,
- AgentV1SettingsAppliedEvent,
- # Send message types
- AgentV1SettingsMessage,
- AgentV1SpeakUpdatedEvent,
- AgentV1UpdatePromptMessage,
- AgentV1UpdateSpeakMessage,
- AgentV1UserStartedSpeakingEvent,
- AgentV1WarningEvent,
- # Receive event types
- AgentV1WelcomeMessage,
-)
-
-# Response union type with binary support
V1SocketClientResponse = typing.Union[
- AgentV1WelcomeMessage,
- AgentV1SettingsAppliedEvent,
- AgentV1HistoryMessage,
- AgentV1HistoryFunctionCalls,
- AgentV1ConversationTextEvent,
- AgentV1UserStartedSpeakingEvent,
- AgentV1AgentThinkingEvent,
- AgentV1FunctionCallRequestEvent,
- AgentV1FunctionCallResponseMessage, # Bidirectional: Server β Client function responses
- AgentV1AgentStartedSpeakingEvent,
- AgentV1AgentAudioDoneEvent,
- AgentV1PromptUpdatedEvent,
- AgentV1SpeakUpdatedEvent,
- AgentV1InjectionRefusedEvent,
- AgentV1ErrorEvent,
- AgentV1WarningEvent,
- AgentV1AudioChunkEvent, # Binary audio data
- bytes, # Raw binary audio chunks
+ AgentV1ReceiveFunctionCallResponse,
+ AgentV1PromptUpdated,
+ AgentV1SpeakUpdated,
+ AgentV1InjectionRefused,
+ AgentV1Welcome,
+ AgentV1SettingsApplied,
+ AgentV1ConversationText,
+ AgentV1UserStartedSpeaking,
+ AgentV1AgentThinking,
+ AgentV1FunctionCallRequest,
+ AgentV1AgentStartedSpeaking,
+ AgentV1AgentAudioDone,
+ AgentV1Error,
+ AgentV1Warning,
+ str,
]
@@ -73,114 +59,108 @@ def __init__(self, *, websocket: WebSocketClientProtocol):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is for audio chunks)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
async def __aiter__(self):
async for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
async def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages for Agent conversations.
Emits events in the following order:
- EventType.OPEN when connection is established
- - EventType.MESSAGE for each message received (binary audio or JSON events)
+ - EventType.MESSAGE for each message received
- EventType.ERROR if an error occurs
- EventType.CLOSE when connection is closed
"""
await self._emit_async(EventType.OPEN, None)
try:
async for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
await self._emit_async(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- await self._emit_async(EventType.ERROR, exc)
+ await self._emit_async(EventType.ERROR, exc)
finally:
await self._emit_async(EventType.CLOSE, None)
- async def recv(self) -> V1SocketClientResponse:
+ async def send_agent_v_1_settings(self, message: AgentV1Settings) -> None:
"""
- Receive a message from the websocket connection.
- Handles both binary and JSON messages.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1Settings.
"""
- data = await self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ await self._send_model(message)
- async def _send(self, data: typing.Any) -> None:
+ async def send_agent_v_1_update_speak(self, message: AgentV1UpdateSpeak) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1UpdateSpeak.
"""
- if isinstance(data, (bytes, bytearray)):
- await self._websocket.send(data)
- elif isinstance(data, dict):
- await self._websocket.send(json.dumps(data))
- else:
- await self._websocket.send(data)
+ await self._send_model(message)
- async def _send_model(self, data: typing.Any) -> None:
+ async def send_agent_v_1_inject_user_message(self, message: AgentV1InjectUserMessage) -> None:
"""
- Send a Pydantic model to the websocket connection.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1InjectUserMessage.
"""
- await self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- async def send_settings(self, message: AgentV1SettingsMessage) -> None:
- """Send initial agent configuration settings."""
await self._send_model(message)
- async def send_control(self, message: AgentV1ControlMessage) -> None:
- """Send a control message (keep_alive, etc.)."""
+ async def send_agent_v_1_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1InjectAgentMessage.
+ """
await self._send_model(message)
- async def send_update_speak(self, message: AgentV1UpdateSpeakMessage) -> None:
- """Update the agent's speech synthesis settings."""
+ async def send_agent_v_1_send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1SendFunctionCallResponse.
+ """
await self._send_model(message)
- async def send_update_prompt(self, message: AgentV1UpdatePromptMessage) -> None:
- """Update the agent's system prompt."""
+ async def send_agent_v_1_keep_alive(self, message: AgentV1KeepAlive) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1KeepAlive.
+ """
await self._send_model(message)
- async def send_inject_user_message(self, message: AgentV1InjectUserMessageMessage) -> None:
- """Inject a user message into the conversation."""
+ async def send_agent_v_1_update_prompt(self, message: AgentV1UpdatePrompt) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1UpdatePrompt.
+ """
await self._send_model(message)
- async def send_inject_agent_message(self, message: AgentV1InjectAgentMessageMessage) -> None:
- """Inject an agent message into the conversation."""
+ async def send_agent_v_1_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
await self._send_model(message)
- async def send_function_call_response(self, message: AgentV1FunctionCallResponseMessage) -> None:
- """Send the result of a function call back to the agent."""
- await self._send_model(message)
+ async def recv(self) -> V1SocketClientResponse:
+ """
+ Receive a message from the websocket connection.
+ """
+ data = await self._websocket.recv()
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
+
+ async def _send(self, data: typing.Any) -> None:
+ """
+ Send a message to the websocket connection.
+ """
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ await self._websocket.send(data)
- async def send_media(self, message: AgentV1MediaMessage) -> None:
- """Send binary audio data to the agent."""
- await self._send(message)
+ async def _send_model(self, data: typing.Any) -> None:
+ """
+ Send a Pydantic model to the websocket connection.
+ """
+ await self._send(data.dict())
class V1SocketClient(EventEmitterMixin):
@@ -188,111 +168,105 @@ def __init__(self, *, websocket: websockets_sync_connection.Connection):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is for audio chunks)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
def __iter__(self):
for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages for Agent conversations.
Emits events in the following order:
- EventType.OPEN when connection is established
- - EventType.MESSAGE for each message received (binary audio or JSON events)
+ - EventType.MESSAGE for each message received
- EventType.ERROR if an error occurs
- EventType.CLOSE when connection is closed
"""
self._emit(EventType.OPEN, None)
try:
for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
self._emit(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- self._emit(EventType.ERROR, exc)
+ self._emit(EventType.ERROR, exc)
finally:
self._emit(EventType.CLOSE, None)
- def recv(self) -> V1SocketClientResponse:
+ def send_agent_v_1_settings(self, message: AgentV1Settings) -> None:
"""
- Receive a message from the websocket connection.
- Handles both binary and JSON messages.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1Settings.
"""
- data = self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ self._send_model(message)
- def _send(self, data: typing.Any) -> None:
+ def send_agent_v_1_update_speak(self, message: AgentV1UpdateSpeak) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1UpdateSpeak.
"""
- if isinstance(data, (bytes, bytearray)):
- self._websocket.send(data)
- elif isinstance(data, dict):
- self._websocket.send(json.dumps(data))
- else:
- self._websocket.send(data)
+ self._send_model(message)
- def _send_model(self, data: typing.Any) -> None:
+ def send_agent_v_1_inject_user_message(self, message: AgentV1InjectUserMessage) -> None:
"""
- Send a Pydantic model to the websocket connection.
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1InjectUserMessage.
"""
- self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- def send_settings(self, message: AgentV1SettingsMessage) -> None:
- """Send initial agent configuration settings."""
self._send_model(message)
- def send_control(self, message: AgentV1ControlMessage) -> None:
- """Send a control message (keep_alive, etc.)."""
+ def send_agent_v_1_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1InjectAgentMessage.
+ """
self._send_model(message)
- def send_update_speak(self, message: AgentV1UpdateSpeakMessage) -> None:
- """Update the agent's speech synthesis settings."""
+ def send_agent_v_1_send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1SendFunctionCallResponse.
+ """
self._send_model(message)
- def send_update_prompt(self, message: AgentV1UpdatePromptMessage) -> None:
- """Update the agent's system prompt."""
+ def send_agent_v_1_keep_alive(self, message: AgentV1KeepAlive) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1KeepAlive.
+ """
self._send_model(message)
- def send_inject_user_message(self, message: AgentV1InjectUserMessageMessage) -> None:
- """Inject a user message into the conversation."""
+ def send_agent_v_1_update_prompt(self, message: AgentV1UpdatePrompt) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a AgentV1UpdatePrompt.
+ """
self._send_model(message)
- def send_inject_agent_message(self, message: AgentV1InjectAgentMessageMessage) -> None:
- """Inject an agent message into the conversation."""
+ def send_agent_v_1_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
self._send_model(message)
- def send_function_call_response(self, message: AgentV1FunctionCallResponseMessage) -> None:
- """Send the result of a function call back to the agent."""
- self._send_model(message)
+ def recv(self) -> V1SocketClientResponse:
+ """
+ Receive a message from the websocket connection.
+ """
+ data = self._websocket.recv()
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
+
+ def _send(self, data: typing.Any) -> None:
+ """
+ Send a message to the websocket connection.
+ """
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ self._websocket.send(data)
- def send_media(self, message: AgentV1MediaMessage) -> None:
- """Send binary audio data to the agent."""
- self._send(message)
+ def _send_model(self, data: typing.Any) -> None:
+ """
+ Send a Pydantic model to the websocket connection.
+ """
+ self._send(data.dict())
diff --git a/src/deepgram/agent/v1/types/__init__.py b/src/deepgram/agent/v1/types/__init__.py
new file mode 100644
index 00000000..9007d199
--- /dev/null
+++ b/src/deepgram/agent/v1/types/__init__.py
@@ -0,0 +1,482 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .agent_v1agent_audio_done import AgentV1AgentAudioDone
+ from .agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking
+ from .agent_v1agent_thinking import AgentV1AgentThinking
+ from .agent_v1conversation_text import AgentV1ConversationText
+ from .agent_v1conversation_text_role import AgentV1ConversationTextRole
+ from .agent_v1error import AgentV1Error
+ from .agent_v1function_call_request import AgentV1FunctionCallRequest
+ from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem
+ from .agent_v1inject_agent_message import AgentV1InjectAgentMessage
+ from .agent_v1inject_user_message import AgentV1InjectUserMessage
+ from .agent_v1injection_refused import AgentV1InjectionRefused
+ from .agent_v1keep_alive import AgentV1KeepAlive
+ from .agent_v1prompt_updated import AgentV1PromptUpdated
+ from .agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse
+ from .agent_v1send_function_call_response import AgentV1SendFunctionCallResponse
+ from .agent_v1settings import AgentV1Settings
+ from .agent_v1settings_agent import AgentV1SettingsAgent
+ from .agent_v1settings_agent_context import AgentV1SettingsAgentContext
+ from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem
+ from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContent
+ from .agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ )
+ from .agent_v1settings_agent_listen import AgentV1SettingsAgentListen
+ from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProvider
+ from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeak
+ from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint
+ from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint
+ from .agent_v1settings_agent_speak_endpoint_provider import (
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesia,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_deepgram import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgram,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabs,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai import AgentV1SettingsAgentSpeakEndpointProviderOpenAi
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ )
+ from .agent_v1settings_agent_speak_item import AgentV1SettingsAgentSpeakItem
+ from .agent_v1settings_agent_speak_item_endpoint import AgentV1SettingsAgentSpeakItemEndpoint
+ from .agent_v1settings_agent_speak_item_provider import (
+ AgentV1SettingsAgentSpeakItemProvider,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAi,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly import AgentV1SettingsAgentSpeakItemProviderAwsPolly
+ from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+ )
+ from .agent_v1settings_agent_speak_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+ )
+ from .agent_v1settings_agent_speak_item_provider_cartesia import AgentV1SettingsAgentSpeakItemProviderCartesia
+ from .agent_v1settings_agent_speak_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+ )
+ from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoice,
+ )
+ from .agent_v1settings_agent_speak_item_provider_deepgram import AgentV1SettingsAgentSpeakItemProviderDeepgram
+ from .agent_v1settings_agent_speak_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+ )
+ from .agent_v1settings_agent_speak_item_provider_eleven_labs import AgentV1SettingsAgentSpeakItemProviderElevenLabs
+ from .agent_v1settings_agent_speak_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+ )
+ from .agent_v1settings_agent_speak_item_provider_open_ai import AgentV1SettingsAgentSpeakItemProviderOpenAi
+ from .agent_v1settings_agent_speak_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiModel,
+ )
+ from .agent_v1settings_agent_speak_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakItemProviderOpenAiVoice,
+ )
+ from .agent_v1settings_agent_think import AgentV1SettingsAgentThink
+ from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLength
+ from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpoint
+ from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItem
+ from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpoint
+ from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProvider
+ from .agent_v1settings_agent_think_provider_credentials import AgentV1SettingsAgentThinkProviderCredentials
+ from .agent_v1settings_agent_think_provider_credentials_credentials import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentials,
+ )
+ from .agent_v1settings_agent_think_provider_credentials_credentials_type import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsType,
+ )
+ from .agent_v1settings_agent_think_provider_credentials_model import (
+ AgentV1SettingsAgentThinkProviderCredentialsModel,
+ )
+ from .agent_v1settings_agent_think_provider_model import AgentV1SettingsAgentThinkProviderModel
+ from .agent_v1settings_agent_think_provider_three import AgentV1SettingsAgentThinkProviderThree
+ from .agent_v1settings_agent_think_provider_three_model import AgentV1SettingsAgentThinkProviderThreeModel
+ from .agent_v1settings_agent_think_provider_two import AgentV1SettingsAgentThinkProviderTwo
+ from .agent_v1settings_agent_think_provider_two_model import AgentV1SettingsAgentThinkProviderTwoModel
+ from .agent_v1settings_agent_think_provider_zero import AgentV1SettingsAgentThinkProviderZero
+ from .agent_v1settings_agent_think_provider_zero_model import AgentV1SettingsAgentThinkProviderZeroModel
+ from .agent_v1settings_applied import AgentV1SettingsApplied
+ from .agent_v1settings_audio import AgentV1SettingsAudio
+ from .agent_v1settings_audio_input import AgentV1SettingsAudioInput
+ from .agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+ from .agent_v1settings_audio_output import AgentV1SettingsAudioOutput
+ from .agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+ from .agent_v1settings_flags import AgentV1SettingsFlags
+ from .agent_v1speak_updated import AgentV1SpeakUpdated
+ from .agent_v1update_prompt import AgentV1UpdatePrompt
+ from .agent_v1update_speak import AgentV1UpdateSpeak
+ from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak
+ from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint
+ from .agent_v1update_speak_speak_provider import (
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly import AgentV1UpdateSpeakSpeakProviderAwsPolly
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ from .agent_v1update_speak_speak_provider_cartesia import AgentV1UpdateSpeakSpeakProviderCartesia
+ from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ from .agent_v1update_speak_speak_provider_deepgram import AgentV1UpdateSpeakSpeakProviderDeepgram
+ from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+ from .agent_v1update_speak_speak_provider_eleven_labs import AgentV1UpdateSpeakSpeakProviderElevenLabs
+ from .agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ )
+ from .agent_v1update_speak_speak_provider_open_ai import AgentV1UpdateSpeakSpeakProviderOpenAi
+ from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+ from .agent_v1user_started_speaking import AgentV1UserStartedSpeaking
+ from .agent_v1warning import AgentV1Warning
+ from .agent_v1welcome import AgentV1Welcome
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".agent_v1agent_audio_done",
+ "AgentV1AgentStartedSpeaking": ".agent_v1agent_started_speaking",
+ "AgentV1AgentThinking": ".agent_v1agent_thinking",
+ "AgentV1ConversationText": ".agent_v1conversation_text",
+ "AgentV1ConversationTextRole": ".agent_v1conversation_text_role",
+ "AgentV1Error": ".agent_v1error",
+ "AgentV1FunctionCallRequest": ".agent_v1function_call_request",
+ "AgentV1FunctionCallRequestFunctionsItem": ".agent_v1function_call_request_functions_item",
+ "AgentV1InjectAgentMessage": ".agent_v1inject_agent_message",
+ "AgentV1InjectUserMessage": ".agent_v1inject_user_message",
+ "AgentV1InjectionRefused": ".agent_v1injection_refused",
+ "AgentV1KeepAlive": ".agent_v1keep_alive",
+ "AgentV1PromptUpdated": ".agent_v1prompt_updated",
+ "AgentV1ReceiveFunctionCallResponse": ".agent_v1receive_function_call_response",
+ "AgentV1SendFunctionCallResponse": ".agent_v1send_function_call_response",
+ "AgentV1Settings": ".agent_v1settings",
+ "AgentV1SettingsAgent": ".agent_v1settings_agent",
+ "AgentV1SettingsAgentContext": ".agent_v1settings_agent_context",
+ "AgentV1SettingsAgentContextMessagesItem": ".agent_v1settings_agent_context_messages_item",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".agent_v1settings_agent_context_messages_item_content",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".agent_v1settings_agent_context_messages_item_content_role",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".agent_v1settings_agent_context_messages_item_function_calls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".agent_v1settings_agent_context_messages_item_function_calls_function_calls_item",
+ "AgentV1SettingsAgentListen": ".agent_v1settings_agent_listen",
+ "AgentV1SettingsAgentListenProvider": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentSpeak": ".agent_v1settings_agent_speak",
+ "AgentV1SettingsAgentSpeakEndpoint": ".agent_v1settings_agent_speak_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".agent_v1settings_agent_speak_endpoint_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia": ".agent_v1settings_agent_speak_endpoint_provider_cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram": ".agent_v1settings_agent_speak_endpoint_provider_deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".agent_v1settings_agent_speak_endpoint_provider_deepgram_model",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".agent_v1settings_agent_speak_endpoint_provider_open_ai",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_model",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_voice",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakItem": ".agent_v1settings_agent_speak_item",
+ "AgentV1SettingsAgentSpeakItemEndpoint": ".agent_v1settings_agent_speak_item_endpoint",
+ "AgentV1SettingsAgentSpeakItemProvider": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly": ".agent_v1settings_agent_speak_item_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_item_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine": ".agent_v1settings_agent_speak_item_provider_aws_polly_engine",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice": ".agent_v1settings_agent_speak_item_provider_aws_polly_voice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia": ".agent_v1settings_agent_speak_item_provider_cartesia",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId": ".agent_v1settings_agent_speak_item_provider_cartesia_model_id",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice": ".agent_v1settings_agent_speak_item_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram": ".agent_v1settings_agent_speak_item_provider_deepgram",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel": ".agent_v1settings_agent_speak_item_provider_deepgram_model",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs": ".agent_v1settings_agent_speak_item_provider_eleven_labs",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId": ".agent_v1settings_agent_speak_item_provider_eleven_labs_model_id",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi": ".agent_v1settings_agent_speak_item_provider_open_ai",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel": ".agent_v1settings_agent_speak_item_provider_open_ai_model",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice": ".agent_v1settings_agent_speak_item_provider_open_ai_voice",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi": ".agent_v1settings_agent_speak_item_provider",
+ "AgentV1SettingsAgentThink": ".agent_v1settings_agent_think",
+ "AgentV1SettingsAgentThinkContextLength": ".agent_v1settings_agent_think_context_length",
+ "AgentV1SettingsAgentThinkEndpoint": ".agent_v1settings_agent_think_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".agent_v1settings_agent_think_functions_item",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".agent_v1settings_agent_think_functions_item_endpoint",
+ "AgentV1SettingsAgentThinkProvider": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProviderCredentials": ".agent_v1settings_agent_think_provider_credentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials": ".agent_v1settings_agent_think_provider_credentials_credentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType": ".agent_v1settings_agent_think_provider_credentials_credentials_type",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel": ".agent_v1settings_agent_think_provider_credentials_model",
+ "AgentV1SettingsAgentThinkProviderModel": ".agent_v1settings_agent_think_provider_model",
+ "AgentV1SettingsAgentThinkProviderThree": ".agent_v1settings_agent_think_provider_three",
+ "AgentV1SettingsAgentThinkProviderThreeModel": ".agent_v1settings_agent_think_provider_three_model",
+ "AgentV1SettingsAgentThinkProviderTwo": ".agent_v1settings_agent_think_provider_two",
+ "AgentV1SettingsAgentThinkProviderTwoModel": ".agent_v1settings_agent_think_provider_two_model",
+ "AgentV1SettingsAgentThinkProviderZero": ".agent_v1settings_agent_think_provider_zero",
+ "AgentV1SettingsAgentThinkProviderZeroModel": ".agent_v1settings_agent_think_provider_zero_model",
+ "AgentV1SettingsApplied": ".agent_v1settings_applied",
+ "AgentV1SettingsAudio": ".agent_v1settings_audio",
+ "AgentV1SettingsAudioInput": ".agent_v1settings_audio_input",
+ "AgentV1SettingsAudioInputEncoding": ".agent_v1settings_audio_input_encoding",
+ "AgentV1SettingsAudioOutput": ".agent_v1settings_audio_output",
+ "AgentV1SettingsAudioOutputEncoding": ".agent_v1settings_audio_output_encoding",
+ "AgentV1SettingsFlags": ".agent_v1settings_flags",
+ "AgentV1SpeakUpdated": ".agent_v1speak_updated",
+ "AgentV1UpdatePrompt": ".agent_v1update_prompt",
+ "AgentV1UpdateSpeak": ".agent_v1update_speak",
+ "AgentV1UpdateSpeakSpeak": ".agent_v1update_speak_speak",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".agent_v1update_speak_speak_endpoint",
+ "AgentV1UpdateSpeakSpeakProvider": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".agent_v1update_speak_speak_provider_aws_polly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".agent_v1update_speak_speak_provider_aws_polly_credentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".agent_v1update_speak_speak_provider_aws_polly_credentials_type",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".agent_v1update_speak_speak_provider_aws_polly_engine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".agent_v1update_speak_speak_provider_aws_polly_voice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".agent_v1update_speak_speak_provider_cartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".agent_v1update_speak_speak_provider_cartesia_model_id",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".agent_v1update_speak_speak_provider_cartesia_voice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".agent_v1update_speak_speak_provider_deepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".agent_v1update_speak_speak_provider_deepgram_model",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".agent_v1update_speak_speak_provider_eleven_labs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".agent_v1update_speak_speak_provider_eleven_labs_model_id",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".agent_v1update_speak_speak_provider_open_ai",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".agent_v1update_speak_speak_provider_open_ai_model",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".agent_v1update_speak_speak_provider_open_ai_voice",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".agent_v1update_speak_speak_provider",
+ "AgentV1UserStartedSpeaking": ".agent_v1user_started_speaking",
+ "AgentV1Warning": ".agent_v1warning",
+ "AgentV1Welcome": ".agent_v1welcome",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentThinking",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectionRefused",
+ "AgentV1KeepAlive",
+ "AgentV1PromptUpdated",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesia",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgram",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakItem",
+ "AgentV1SettingsAgentSpeakItemEndpoint",
+ "AgentV1SettingsAgentSpeakItemProvider",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakItemProviderCartesia",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgram",
+ "AgentV1SettingsAgentSpeakItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAi",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakItemProvider_OpenAi",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentials",
+ "AgentV1SettingsAgentThinkProviderCredentialsCredentialsType",
+ "AgentV1SettingsAgentThinkProviderCredentialsModel",
+ "AgentV1SettingsAgentThinkProviderModel",
+ "AgentV1SettingsAgentThinkProviderThree",
+ "AgentV1SettingsAgentThinkProviderThreeModel",
+ "AgentV1SettingsAgentThinkProviderTwo",
+ "AgentV1SettingsAgentThinkProviderTwoModel",
+ "AgentV1SettingsAgentThinkProviderZero",
+ "AgentV1SettingsAgentThinkProviderZeroModel",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsFlags",
+ "AgentV1SpeakUpdated",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1Warning",
+ "AgentV1Welcome",
+]
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_text_message.py b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py
similarity index 59%
rename from src/deepgram/extensions/types/sockets/speak_v1_text_message.py
rename to src/deepgram/agent/v1/types/agent_v1agent_audio_done.py
index a6d49bfa..95f3f376 100644
--- a/src/deepgram/extensions/types/sockets/speak_v1_text_message.py
+++ b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py
@@ -1,4 +1,4 @@
-# Speak V1 Text Message - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,20 +6,16 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class SpeakV1TextMessage(UniversalBaseModel):
+class AgentV1AgentAudioDone(UniversalBaseModel):
+ type: typing.Literal["AgentAudioDone"] = pydantic.Field(default="AgentAudioDone")
"""
- Request to convert text to speech
+ Message type identifier indicating the agent has finished sending audio
"""
-
- type: typing.Literal["Speak"]
- """Message type identifier"""
-
- text: str
- """The input text to be converted to speech"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py
new file mode 100644
index 00000000..e6c47c6f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1AgentStartedSpeaking(UniversalBaseModel):
+ type: typing.Literal["AgentStartedSpeaking"] = pydantic.Field(default="AgentStartedSpeaking")
+ """
+ Message type identifier for agent started speaking
+ """
+
+ total_latency: float = pydantic.Field()
+ """
+ Seconds from receiving the user's utterance to producing the agent's reply
+ """
+
+ tts_latency: float = pydantic.Field()
+ """
+ The portion of total latency attributable to text-to-speech
+ """
+
+ ttt_latency: float = pydantic.Field()
+ """
+ The portion of total latency attributable to text-to-text (usually an LLM)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1agent_thinking.py b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py
new file mode 100644
index 00000000..4b63c92f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1AgentThinking(UniversalBaseModel):
+ type: typing.Literal["AgentThinking"] = pydantic.Field(default="AgentThinking")
+ """
+ Message type identifier for agent thinking
+ """
+
+ content: str = pydantic.Field()
+ """
+ The text of the agent's thought process
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text.py b/src/deepgram/agent/v1/types/agent_v1conversation_text.py
new file mode 100644
index 00000000..9888c93b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1conversation_text.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1conversation_text_role import AgentV1ConversationTextRole
+
+
+class AgentV1ConversationText(UniversalBaseModel):
+ type: typing.Literal["ConversationText"] = pydantic.Field(default="ConversationText")
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1ConversationTextRole = pydantic.Field()
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str = pydantic.Field()
+ """
+ The actual statement that was spoken
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py b/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py
new file mode 100644
index 00000000..785333dd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1ConversationTextRole = typing.Union[typing.Literal["user", "assistant"], typing.Any]
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_error_event.py b/src/deepgram/agent/v1/types/agent_v1error.py
similarity index 50%
rename from src/deepgram/extensions/types/sockets/agent_v1_error_event.py
rename to src/deepgram/agent/v1/types/agent_v1error.py
index 004151b7..e0c01466 100644
--- a/src/deepgram/extensions/types/sockets/agent_v1_error_event.py
+++ b/src/deepgram/agent/v1/types/agent_v1error.py
@@ -1,4 +1,4 @@
-# Agent V1 Error Event - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,23 +6,26 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class AgentV1ErrorEvent(UniversalBaseModel):
+class AgentV1Error(UniversalBaseModel):
+ type: typing.Literal["Error"] = pydantic.Field(default="Error")
"""
- Receive an error message from the server when an error occurs
+ Message type identifier for error responses
+ """
+
+ description: str = pydantic.Field()
+ """
+ A description of what went wrong
+ """
+
+ code: str = pydantic.Field()
+ """
+ Error code identifying the type of error
"""
-
- type: typing.Literal["Error"]
- """Message type identifier for error responses"""
-
- description: str
- """A description of what went wrong"""
-
- code: str
- """Error code identifying the type of error"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request.py b/src/deepgram/agent/v1/types/agent_v1function_call_request.py
new file mode 100644
index 00000000..4c5e7c4a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1function_call_request.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem
+
+
+class AgentV1FunctionCallRequest(UniversalBaseModel):
+ type: typing.Literal["FunctionCallRequest"] = pydantic.Field(default="FunctionCallRequest")
+ """
+ Message type identifier for function call requests
+ """
+
+ functions: typing.List[AgentV1FunctionCallRequestFunctionsItem] = pydantic.Field()
+ """
+ Array of functions to be called
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py
new file mode 100644
index 00000000..dcd75dd4
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1FunctionCallRequestFunctionsItem(UniversalBaseModel):
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the function call
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function to call
+ """
+
+ arguments: str = pydantic.Field()
+ """
+ JSON string containing the function arguments
+ """
+
+ client_side: bool = pydantic.Field()
+ """
+ Whether the function should be executed client-side
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_speech_started_event.py b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py
similarity index 53%
rename from src/deepgram/extensions/types/sockets/listen_v1_speech_started_event.py
rename to src/deepgram/agent/v1/types/agent_v1inject_agent_message.py
index b9b4900a..6711f6dc 100644
--- a/src/deepgram/extensions/types/sockets/listen_v1_speech_started_event.py
+++ b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py
@@ -1,4 +1,4 @@
-# Listen V1 Speech Started Event - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,23 +6,21 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class ListenV1SpeechStartedEvent(UniversalBaseModel):
+class AgentV1InjectAgentMessage(UniversalBaseModel):
+ type: typing.Literal["InjectAgentMessage"] = pydantic.Field(default="InjectAgentMessage")
"""
- vad_events is true and speech has been detected
+ Message type identifier for injecting an agent message
+ """
+
+ message: str = pydantic.Field()
+ """
+ The statement that the agent should say
"""
-
- type: typing.Literal["SpeechStarted"]
- """Message type identifier"""
-
- channel: typing.List[int]
- """The channel"""
-
- timestamp: float
- """The timestamp"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1inject_user_message.py b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py
new file mode 100644
index 00000000..78a3ebf9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1InjectUserMessage(UniversalBaseModel):
+ type: typing.Literal["InjectUserMessage"] = pydantic.Field(default="InjectUserMessage")
+ """
+ Message type identifier for injecting a user message
+ """
+
+ content: str = pydantic.Field()
+ """
+ The specific phrase or statement the agent should respond to
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1injection_refused.py b/src/deepgram/agent/v1/types/agent_v1injection_refused.py
new file mode 100644
index 00000000..b185fccc
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1injection_refused.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1InjectionRefused(UniversalBaseModel):
+ type: typing.Literal["InjectionRefused"] = pydantic.Field(default="InjectionRefused")
+ """
+ Message type identifier for injection refused
+ """
+
+ message: str = pydantic.Field()
+ """
+ Details about why the injection was refused
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_control_message.py b/src/deepgram/agent/v1/types/agent_v1keep_alive.py
similarity index 60%
rename from src/deepgram/extensions/types/sockets/agent_v1_control_message.py
rename to src/deepgram/agent/v1/types/agent_v1keep_alive.py
index 270e0e12..49266088 100644
--- a/src/deepgram/extensions/types/sockets/agent_v1_control_message.py
+++ b/src/deepgram/agent/v1/types/agent_v1keep_alive.py
@@ -1,4 +1,4 @@
-# Agent V1 Control Message - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,18 +6,21 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class AgentV1ControlMessage(UniversalBaseModel):
+class AgentV1KeepAlive(UniversalBaseModel):
"""
Send a control message to the agent
"""
-
- type: typing.Literal["KeepAlive"] = "KeepAlive"
- """Message type identifier"""
+
+ type: typing.Literal["KeepAlive"] = pydantic.Field(default="KeepAlive")
+ """
+ Message type identifier
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_control_message.py b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py
similarity index 60%
rename from src/deepgram/extensions/types/sockets/speak_v1_control_message.py
rename to src/deepgram/agent/v1/types/agent_v1prompt_updated.py
index 9715792e..f4827a96 100644
--- a/src/deepgram/extensions/types/sockets/speak_v1_control_message.py
+++ b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py
@@ -1,4 +1,4 @@
-# Speak V1 Control Message - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,17 +6,16 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class SpeakV1ControlMessage(UniversalBaseModel):
+class AgentV1PromptUpdated(UniversalBaseModel):
+ type: typing.Literal["PromptUpdated"] = pydantic.Field(default="PromptUpdated")
"""
- Control messages for managing the Text to Speech WebSocket connection
+ Message type identifier for prompt update confirmation
"""
-
- type: typing.Literal["Flush", "Clear", "Close"]
- """Message type identifier"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py
new file mode 100644
index 00000000..8bf4d3ce
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1ReceiveFunctionCallResponse(UniversalBaseModel):
+ """
+ Function call response message used bidirectionally:
+
+ β’ **Client β Server**: Response after client executes a function
+ marked as client_side: true
+ β’ **Server β Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse")
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier for the function call.
+
+ β’ **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ β’ **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function being called
+ """
+
+ content: str = pydantic.Field()
+ """
+ The content or result of the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py
new file mode 100644
index 00000000..d493cbaa
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SendFunctionCallResponse(UniversalBaseModel):
+ """
+ Function call response message used bidirectionally:
+
+ β’ **Client β Server**: Response after client executes a function
+ marked as client_side: true
+ β’ **Server β Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse")
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier for the function call.
+
+ β’ **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ β’ **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function being called
+ """
+
+ content: str = pydantic.Field()
+ """
+ The content or result of the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings.py b/src/deepgram/agent/v1/types/agent_v1settings.py
new file mode 100644
index 00000000..201533d3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent import AgentV1SettingsAgent
+from .agent_v1settings_audio import AgentV1SettingsAudio
+from .agent_v1settings_flags import AgentV1SettingsFlags
+
+
+class AgentV1Settings(UniversalBaseModel):
+ type: typing.Literal["Settings"] = "Settings"
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Tags to associate with the request
+ """
+
+ experimental: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ To enable experimental features
+ """
+
+ flags: typing.Optional[AgentV1SettingsFlags] = None
+ mip_opt_out: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ To opt out of Deepgram Model Improvement Program
+ """
+
+ audio: AgentV1SettingsAudio
+ agent: AgentV1SettingsAgent
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent.py b/src/deepgram/agent/v1/types/agent_v1settings_agent.py
new file mode 100644
index 00000000..3f4f6f81
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context import AgentV1SettingsAgentContext
+from .agent_v1settings_agent_listen import AgentV1SettingsAgentListen
+from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeak
+from .agent_v1settings_agent_think import AgentV1SettingsAgentThink
+
+
+class AgentV1SettingsAgent(UniversalBaseModel):
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Agent language
+ """
+
+ context: typing.Optional[AgentV1SettingsAgentContext] = pydantic.Field(default=None)
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ listen: typing.Optional[AgentV1SettingsAgentListen] = None
+ think: typing.Optional[AgentV1SettingsAgentThink] = None
+ speak: typing.Optional[AgentV1SettingsAgentSpeak] = None
+ greeting: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Optional message that agent will speak at the start
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py
new file mode 100644
index 00000000..635f68b5
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem
+
+
+class AgentV1SettingsAgentContext(UniversalBaseModel):
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ messages: typing.Optional[typing.List[AgentV1SettingsAgentContextMessagesItem]] = pydantic.Field(default=None)
+ """
+ Conversation history as a list of messages and function calls
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py
new file mode 100644
index 00000000..2061fd2d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContent
+from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+)
+
+AgentV1SettingsAgentContextMessagesItem = typing.Union[
+ AgentV1SettingsAgentContextMessagesItemContent, AgentV1SettingsAgentContextMessagesItemFunctionCalls
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py
new file mode 100644
index 00000000..9bc207ab
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemContent(UniversalBaseModel):
+ """
+ Conversation text as part of the conversation history
+ """
+
+ type: typing.Literal["History"] = pydantic.Field(default="History")
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1SettingsAgentContextMessagesItemContentRole = pydantic.Field()
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str = pydantic.Field()
+ """
+ The actual statement that was spoken
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py
new file mode 100644
index 00000000..19a3bcc0
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentContextMessagesItemContentRole = typing.Union[typing.Literal["user", "assistant"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py
new file mode 100644
index 00000000..6759f8b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCalls(UniversalBaseModel):
+ """
+ Client-side or server-side function call request and response as part of the conversation history
+ """
+
+ type: typing.Literal["History"] = "History"
+ function_calls: typing.List[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem] = (
+ pydantic.Field()
+ )
+ """
+ List of function call objects
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
new file mode 100644
index 00000000..9fead900
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem(UniversalBaseModel):
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the function call
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the function called
+ """
+
+ client_side: bool = pydantic.Field()
+ """
+ Indicates if the call was client-side or server-side
+ """
+
+ arguments: str = pydantic.Field()
+ """
+ Arguments passed to the function
+ """
+
+ response: str = pydantic.Field()
+ """
+ Response from the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py
new file mode 100644
index 00000000..22951f00
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProvider
+
+
+class AgentV1SettingsAgentListen(UniversalBaseModel):
+ provider: typing.Optional[AgentV1SettingsAgentListenProvider] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py
new file mode 100644
index 00000000..07de7713
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentListenProvider(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram")
+ """
+ Provider type for speech-to-text
+ """
+
+ model: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Model to use for speech to text
+ """
+
+ keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Prompt key-term recognition (nova-3 'en' only)
+ """
+
+ smart_format: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Applies smart formatting to improve transcript readability (Deepgram providers only)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py
new file mode 100644
index 00000000..1599944d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint
+from .agent_v1settings_agent_speak_item import AgentV1SettingsAgentSpeakItem
+
+AgentV1SettingsAgentSpeak = typing.Union[AgentV1SettingsAgentSpeakEndpoint, typing.List[AgentV1SettingsAgentSpeakItem]]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py
new file mode 100644
index 00000000..e6647bee
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint
+from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProvider
+
+
+class AgentV1SettingsAgentSpeakEndpoint(UniversalBaseModel):
+ provider: AgentV1SettingsAgentSpeakEndpointProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentSpeakEndpointEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py
new file mode 100644
index 00000000..e4dfb433
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakEndpointEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py
new file mode 100644
index 00000000..63f4da9f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py
@@ -0,0 +1,121 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ model: AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentSpeakEndpointProvider = typing.Union[
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
new file mode 100644
index 00000000..dcc0771d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str = pydantic.Field()
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..3aef30ab
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..515f0617
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType = typing.Union[
+ typing.Literal["sts", "iam"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py
new file mode 100644
index 00000000..2f182419
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py
new file mode 100644
index 00000000..0079e7b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia.py
new file mode 100644
index 00000000..a57d83f6
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderCartesia(UniversalBaseModel):
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId = pydantic.Field()
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Cartesia language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py
new file mode 100644
index 00000000..b81e30b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_control_message.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
similarity index 59%
rename from src/deepgram/extensions/types/sockets/listen_v1_control_message.py
rename to src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
index 1e3ff4e0..52d23801 100644
--- a/src/deepgram/extensions/types/sockets/listen_v1_control_message.py
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
@@ -1,4 +1,4 @@
-# Listen V1 Control Message - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,17 +6,21 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class ListenV1ControlMessage(UniversalBaseModel):
+class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
"""
- Control messages for managing the Speech to Text WebSocket connection
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
"""
-
- type: typing.Literal["Finalize", "CloseStream", "KeepAlive"]
- """Message type identifier"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram.py
new file mode 100644
index 00000000..c77c94b1
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderDeepgram(UniversalBaseModel):
+ model: AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel = pydantic.Field()
+ """
+ Deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py
new file mode 100644
index 00000000..161119d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py
new file mode 100644
index 00000000..5e4c8f6b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderElevenLabs(UniversalBaseModel):
+ model_id: AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId = pydantic.Field()
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Eleven Labs optional language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..4ed8c7e8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py
new file mode 100644
index 00000000..d2da0b7c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderOpenAi(UniversalBaseModel):
+ model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel = pydantic.Field()
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice = pydantic.Field()
+ """
+ OpenAI voice
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py
new file mode 100644
index 00000000..f83a1943
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py
new file mode 100644
index 00000000..0e8a10eb
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item.py
new file mode 100644
index 00000000..632e3fae
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_endpoint import AgentV1SettingsAgentSpeakItemEndpoint
+from .agent_v1settings_agent_speak_item_provider import AgentV1SettingsAgentSpeakItemProvider
+
+
+class AgentV1SettingsAgentSpeakItem(UniversalBaseModel):
+ provider: AgentV1SettingsAgentSpeakItemProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentSpeakItemEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_endpoint.py
new file mode 100644
index 00000000..20661736
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakItemEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider.py
new file mode 100644
index 00000000..872a1027
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider.py
@@ -0,0 +1,117 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoice,
+)
+from .agent_v1settings_agent_speak_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+)
+from .agent_v1settings_agent_speak_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+)
+from .agent_v1settings_agent_speak_item_provider_open_ai_model import AgentV1SettingsAgentSpeakItemProviderOpenAiModel
+from .agent_v1settings_agent_speak_item_provider_open_ai_voice import AgentV1SettingsAgentSpeakItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakItemProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ model: AgentV1SettingsAgentSpeakItemProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakItemProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ model_id: AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakItemProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ model_id: AgentV1SettingsAgentSpeakItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakItemProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakItemProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ model: AgentV1SettingsAgentSpeakItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakItemProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakItemProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentSpeakItemProvider = typing.Union[
+ AgentV1SettingsAgentSpeakItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakItemProvider_OpenAi,
+ AgentV1SettingsAgentSpeakItemProvider_AwsPolly,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly.py
new file mode 100644
index 00000000..fe39ac45
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str = pydantic.Field()
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..856d8463
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..648a685c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_engine.py
new file mode 100644
index 00000000..5d67c9f3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_voice.py
new file mode 100644
index 00000000..d475903a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia.py
new file mode 100644
index 00000000..ee1e1022
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakItemProviderCartesiaVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderCartesia(UniversalBaseModel):
+ model_id: AgentV1SettingsAgentSpeakItemProviderCartesiaModelId = pydantic.Field()
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakItemProviderCartesiaVoice
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Cartesia language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_model_id.py
new file mode 100644
index 00000000..198b4533
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_control_event.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_voice.py
similarity index 57%
rename from src/deepgram/extensions/types/sockets/speak_v1_control_event.py
rename to src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_voice.py
index 65732cc5..69a92e8e 100644
--- a/src/deepgram/extensions/types/sockets/speak_v1_control_event.py
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_cartesia_voice.py
@@ -1,4 +1,4 @@
-# Speak V1 Control Event - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
@@ -6,20 +6,21 @@
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class SpeakV1ControlEvent(UniversalBaseModel):
+class AgentV1SettingsAgentSpeakItemProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
"""
- Control event responses (Flushed, Cleared)
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
"""
-
- type: typing.Literal["Flushed", "Cleared"]
- """Message type identifier"""
-
- sequence_id: int
- """The sequence ID of the response"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram.py
new file mode 100644
index 00000000..0775d562
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakItemProviderDeepgramModel,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderDeepgram(UniversalBaseModel):
+ model: AgentV1SettingsAgentSpeakItemProviderDeepgramModel = pydantic.Field()
+ """
+ Deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram_model.py
new file mode 100644
index 00000000..ed69d46f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs.py
new file mode 100644
index 00000000..eef39c34
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId,
+)
+
+
+class AgentV1SettingsAgentSpeakItemProviderElevenLabs(UniversalBaseModel):
+ model_id: AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId = pydantic.Field()
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Eleven Labs optional language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..cc33cedf
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai.py
new file mode 100644
index 00000000..fc8b1d57
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_item_provider_open_ai_model import AgentV1SettingsAgentSpeakItemProviderOpenAiModel
+from .agent_v1settings_agent_speak_item_provider_open_ai_voice import AgentV1SettingsAgentSpeakItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakItemProviderOpenAi(UniversalBaseModel):
+ model: AgentV1SettingsAgentSpeakItemProviderOpenAiModel = pydantic.Field()
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakItemProviderOpenAiVoice = pydantic.Field()
+ """
+ OpenAI voice
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_model.py
new file mode 100644
index 00000000..4f633312
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_voice.py
new file mode 100644
index 00000000..d49e2db8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_item_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakItemProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py
new file mode 100644
index 00000000..7247b0ea
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLength
+from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpoint
+from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItem
+from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProvider
+
+
+class AgentV1SettingsAgentThink(UniversalBaseModel):
+ provider: AgentV1SettingsAgentThinkProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentThinkEndpoint] = pydantic.Field(default=None)
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ functions: typing.Optional[typing.List[AgentV1SettingsAgentThinkFunctionsItem]] = None
+ prompt: typing.Optional[str] = None
+ context_length: typing.Optional[AgentV1SettingsAgentThinkContextLength] = pydantic.Field(default=None)
+ """
+ Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py
new file mode 100644
index 00000000..daac7703
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkContextLength = typing.Union[typing.Literal["max"], float]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py
new file mode 100644
index 00000000..1e17900b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentThinkEndpoint(UniversalBaseModel):
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom LLM endpoint URL
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None)
+ """
+ Custom headers for the endpoint
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py
new file mode 100644
index 00000000..a787f583
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpoint
+
+
+class AgentV1SettingsAgentThinkFunctionsItem(UniversalBaseModel):
+ name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Function name
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Function description
+ """
+
+ parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Function parameters
+ """
+
+ endpoint: typing.Optional[AgentV1SettingsAgentThinkFunctionsItemEndpoint] = pydantic.Field(default=None)
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py
new file mode 100644
index 00000000..e8e48f39
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentThinkFunctionsItemEndpoint(UniversalBaseModel):
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Endpoint URL
+ """
+
+ method: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ HTTP method
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py
new file mode 100644
index 00000000..7ec860c8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_think_provider_credentials import AgentV1SettingsAgentThinkProviderCredentials
+from .agent_v1settings_agent_think_provider_model import AgentV1SettingsAgentThinkProviderModel
+from .agent_v1settings_agent_think_provider_three import AgentV1SettingsAgentThinkProviderThree
+from .agent_v1settings_agent_think_provider_two import AgentV1SettingsAgentThinkProviderTwo
+from .agent_v1settings_agent_think_provider_zero import AgentV1SettingsAgentThinkProviderZero
+
+AgentV1SettingsAgentThinkProvider = typing.Union[
+ AgentV1SettingsAgentThinkProviderZero,
+ AgentV1SettingsAgentThinkProviderCredentials,
+ AgentV1SettingsAgentThinkProviderTwo,
+ AgentV1SettingsAgentThinkProviderThree,
+ AgentV1SettingsAgentThinkProviderModel,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials.py
new file mode 100644
index 00000000..8b323a5c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_credentials_credentials import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentials,
+)
+from .agent_v1settings_agent_think_provider_credentials_model import AgentV1SettingsAgentThinkProviderCredentialsModel
+
+
+class AgentV1SettingsAgentThinkProviderCredentials(UniversalBaseModel):
+ type: typing.Optional[typing.Literal["aws_bedrock"]] = None
+ model: typing.Optional[AgentV1SettingsAgentThinkProviderCredentialsModel] = pydantic.Field(default=None)
+ """
+ AWS Bedrock model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ AWS Bedrock temperature (0-2)
+ """
+
+ credentials: typing.Optional[AgentV1SettingsAgentThinkProviderCredentialsCredentials] = pydantic.Field(default=None)
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials.py
new file mode 100644
index 00000000..2a059394
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_credentials_credentials_type import (
+ AgentV1SettingsAgentThinkProviderCredentialsCredentialsType,
+)
+
+
+class AgentV1SettingsAgentThinkProviderCredentialsCredentials(UniversalBaseModel):
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ type: typing.Optional[AgentV1SettingsAgentThinkProviderCredentialsCredentialsType] = pydantic.Field(default=None)
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ region: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS region
+ """
+
+ access_key_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS access key
+ """
+
+ secret_access_key: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS secret access key
+ """
+
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS session token (required for STS only)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials_type.py
new file mode 100644
index 00000000..fea822de
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderCredentialsCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_model.py
new file mode 100644
index 00000000..1ac2698e
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_credentials_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderCredentialsModel = typing.Union[
+ typing.Literal["anthropic/claude-3-5-sonnet-20240620-v1:0", "anthropic/claude-3-5-haiku-20240307-v1:0"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_model.py
new file mode 100644
index 00000000..21f947bf
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_model.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentThinkProviderModel(UniversalBaseModel):
+ type: typing.Optional[typing.Literal["groq"]] = None
+ model: typing.Optional[typing.Literal["openai/gpt-oss-20b"]] = pydantic.Field(default=None)
+ """
+ Groq model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Groq temperature (0-2)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three.py
new file mode 100644
index 00000000..a87873d0
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_three_model import AgentV1SettingsAgentThinkProviderThreeModel
+
+
+class AgentV1SettingsAgentThinkProviderThree(UniversalBaseModel):
+ type: typing.Optional[typing.Literal["google"]] = None
+ model: typing.Optional[AgentV1SettingsAgentThinkProviderThreeModel] = pydantic.Field(default=None)
+ """
+ Google model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Google temperature (0-2)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three_model.py
new file mode 100644
index 00000000..166399c5
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_three_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderThreeModel = typing.Union[
+ typing.Literal["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemini-2.5-flash"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two.py
new file mode 100644
index 00000000..5dd79754
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_two_model import AgentV1SettingsAgentThinkProviderTwoModel
+
+
+class AgentV1SettingsAgentThinkProviderTwo(UniversalBaseModel):
+ type: typing.Optional[typing.Literal["anthropic"]] = None
+ model: typing.Optional[AgentV1SettingsAgentThinkProviderTwoModel] = pydantic.Field(default=None)
+ """
+ Anthropic model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Anthropic temperature (0-1)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two_model.py
new file mode 100644
index 00000000..00f9393c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_two_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderTwoModel = typing.Union[
+ typing.Literal["claude-3-5-haiku-latest", "claude-sonnet-4-20250514"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero.py
new file mode 100644
index 00000000..0118390a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_zero_model import AgentV1SettingsAgentThinkProviderZeroModel
+
+
+class AgentV1SettingsAgentThinkProviderZero(UniversalBaseModel):
+ type: typing.Optional[typing.Literal["open_ai"]] = None
+ model: typing.Optional[AgentV1SettingsAgentThinkProviderZeroModel] = pydantic.Field(default=None)
+ """
+ OpenAI model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ OpenAI temperature (0-2)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero_model.py
new file mode 100644
index 00000000..2fd8bf88
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_zero_model.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderZeroModel = typing.Union[
+ typing.Literal[
+ "gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4o", "gpt-4o-mini"
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_applied.py b/src/deepgram/agent/v1/types/agent_v1settings_applied.py
new file mode 100644
index 00000000..a17ad602
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_applied.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsApplied(UniversalBaseModel):
+ type: typing.Literal["SettingsApplied"] = pydantic.Field(default="SettingsApplied")
+ """
+ Message type identifier for settings applied confirmation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio.py b/src/deepgram/agent/v1/types/agent_v1settings_audio.py
new file mode 100644
index 00000000..29350538
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_input import AgentV1SettingsAudioInput
+from .agent_v1settings_audio_output import AgentV1SettingsAudioOutput
+
+
+class AgentV1SettingsAudio(UniversalBaseModel):
+ input: typing.Optional[AgentV1SettingsAudioInput] = pydantic.Field(default=None)
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ output: typing.Optional[AgentV1SettingsAudioOutput] = pydantic.Field(default=None)
+ """
+ Audio output configuration settings
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py
new file mode 100644
index 00000000..8b9cae76
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+
+
+class AgentV1SettingsAudioInput(UniversalBaseModel):
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ encoding: AgentV1SettingsAudioInputEncoding = pydantic.Field()
+ """
+ Audio encoding format
+ """
+
+ sample_rate: float = pydantic.Field()
+ """
+ Sample rate in Hz. Common values are 16000, 24000, 44100, 48000
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py
new file mode 100644
index 00000000..232072d3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAudioInputEncoding = typing.Union[
+ typing.Literal[
+ "linear16", "linear32", "flac", "alaw", "mulaw", "amr-nb", "amr-wb", "opus", "ogg-opus", "speex", "g729"
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py
new file mode 100644
index 00000000..e0c0efc8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+
+
+class AgentV1SettingsAudioOutput(UniversalBaseModel):
+ """
+ Audio output configuration settings
+ """
+
+ encoding: typing.Optional[AgentV1SettingsAudioOutputEncoding] = pydantic.Field(default=None)
+ """
+ Audio encoding format for streaming TTS output
+ """
+
+ sample_rate: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Sample rate in Hz
+ """
+
+ bitrate: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Audio bitrate in bits per second
+ """
+
+ container: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Audio container format. If omitted, defaults to 'none'
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py
new file mode 100644
index 00000000..f4b7ca42
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAudioOutputEncoding = typing.Union[typing.Literal["linear16", "mulaw", "alaw"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_flags.py b/src/deepgram/agent/v1/types/agent_v1settings_flags.py
new file mode 100644
index 00000000..db3e96cd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_flags.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsFlags(UniversalBaseModel):
+ history: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Enable or disable history message reporting
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1speak_updated.py b/src/deepgram/agent/v1/types/agent_v1speak_updated.py
new file mode 100644
index 00000000..aeba09d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1speak_updated.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SpeakUpdated(UniversalBaseModel):
+ type: typing.Literal["SpeakUpdated"] = pydantic.Field(default="SpeakUpdated")
+ """
+ Message type identifier for speak update confirmation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_prompt.py b/src/deepgram/agent/v1/types/agent_v1update_prompt.py
new file mode 100644
index 00000000..a479b01b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_prompt.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdatePrompt(UniversalBaseModel):
+ type: typing.Literal["UpdatePrompt"] = pydantic.Field(default="UpdatePrompt")
+ """
+ Message type identifier for prompt update request
+ """
+
+ prompt: str = pydantic.Field()
+ """
+ The new system prompt to be used by the agent
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak.py
new file mode 100644
index 00000000..f776f945
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak
+
+
+class AgentV1UpdateSpeak(UniversalBaseModel):
+ type: typing.Literal["UpdateSpeak"] = pydantic.Field(default="UpdateSpeak")
+ """
+ Message type identifier for updating the speak model
+ """
+
+ speak: AgentV1UpdateSpeakSpeak = pydantic.Field()
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py
new file mode 100644
index 00000000..3ad7b53a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint
+from .agent_v1update_speak_speak_provider import AgentV1UpdateSpeakSpeakProvider
+
+
+class AgentV1UpdateSpeakSpeak(UniversalBaseModel):
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ provider: AgentV1UpdateSpeakSpeakProvider
+ endpoint: typing.Optional[AgentV1UpdateSpeakSpeakEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py
new file mode 100644
index 00000000..2c65c454
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdateSpeakSpeakEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py
new file mode 100644
index 00000000..8d89f941
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py
@@ -0,0 +1,105 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+)
+from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+from .agent_v1update_speak_speak_provider_eleven_labs_model_id import AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ language_code: str
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1UpdateSpeakSpeakProvider = typing.Union[
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py
new file mode 100644
index 00000000..b0f91e2d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+)
+from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language_code: str = pydantic.Field()
+ """
+ Language code (e.g., "en-US")
+ """
+
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..0ec682b2
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..984051d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py
new file mode 100644
index 00000000..2a641f24
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py
new file mode 100644
index 00000000..2be92987
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py
new file mode 100644
index 00000000..d95a4127
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesia(UniversalBaseModel):
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId = pydantic.Field()
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Cartesia language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py
new file mode 100644
index 00000000..0cee24cd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py
new file mode 100644
index 00000000..2a6a918f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
+ """
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_utterance_end_event.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py
similarity index 55%
rename from src/deepgram/extensions/types/sockets/listen_v1_utterance_end_event.py
rename to src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py
index c60f281c..b4376c38 100644
--- a/src/deepgram/extensions/types/sockets/listen_v1_utterance_end_event.py
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py
@@ -1,28 +1,22 @@
-# Listen V1 Utterance End Event - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
import pydantic
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
-class ListenV1UtteranceEndEvent(UniversalBaseModel):
+class AgentV1UpdateSpeakSpeakProviderDeepgram(UniversalBaseModel):
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel = pydantic.Field()
"""
- An utterance has ended
+ Deepgram TTS model
"""
-
- type: typing.Literal["UtteranceEnd"]
- """Message type identifier"""
-
- channel: typing.List[int]
- """The channel"""
-
- last_word_end: float
- """The last word end"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py
new file mode 100644
index 00000000..2e0a9ab9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py
new file mode 100644
index 00000000..7f9f994a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_eleven_labs_model_id import AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+
+
+class AgentV1UpdateSpeakSpeakProviderElevenLabs(UniversalBaseModel):
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId = pydantic.Field()
+ """
+ Eleven Labs model ID
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Eleven Labs optional language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..fdbba96c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py
new file mode 100644
index 00000000..fcd28a96
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderOpenAi(UniversalBaseModel):
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel = pydantic.Field()
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice = pydantic.Field()
+ """
+ OpenAI voice
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py
new file mode 100644
index 00000000..94c2069a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py
new file mode 100644
index 00000000..bc5fdeb9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py
new file mode 100644
index 00000000..ac4d838a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UserStartedSpeaking(UniversalBaseModel):
+ type: typing.Literal["UserStartedSpeaking"] = pydantic.Field(default="UserStartedSpeaking")
+ """
+ Message type identifier indicating that the user has begun speaking
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1warning.py b/src/deepgram/agent/v1/types/agent_v1warning.py
new file mode 100644
index 00000000..cdfacea7
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1warning.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1Warning(UniversalBaseModel):
+ """
+ Notifies the client of non-fatal errors or warnings
+ """
+
+ type: typing.Literal["Warning"] = pydantic.Field(default="Warning")
+ """
+ Message type identifier for warnings
+ """
+
+ description: str = pydantic.Field()
+ """
+ Description of the warning
+ """
+
+ code: str = pydantic.Field()
+ """
+ Warning code identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1welcome.py b/src/deepgram/agent/v1/types/agent_v1welcome.py
new file mode 100644
index 00000000..972f8104
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1welcome.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1Welcome(UniversalBaseModel):
+ type: typing.Literal["Welcome"] = pydantic.Field(default="Welcome")
+ """
+ Message type identifier for welcome message
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ Unique identifier for the request
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/client.py b/src/deepgram/client.py
deleted file mode 100644
index 456d3129..00000000
--- a/src/deepgram/client.py
+++ /dev/null
@@ -1,258 +0,0 @@
-"""
-Custom client entrypoints that extend the generated BaseClient/AsyncBaseClient.
-
-Adds support for `access_token` alongside `api_key` with the following rules:
-- If `access_token` is provided, it takes precedence and sets `Authorization: bearer `
-- When `access_token` is used, `api_key` is forced to "token" to satisfy the generator,
- but the Authorization header is overridden for all HTTP and WebSocket requests.
-"""
-
-import os
-import platform
-import sys
-import types
-import uuid
-from typing import Any, Dict, Optional
-
-from .base_client import AsyncBaseClient, BaseClient
-
-from deepgram.core.client_wrapper import BaseClientWrapper
-from deepgram.extensions.core.instrumented_http import InstrumentedAsyncHttpClient, InstrumentedHttpClient
-from deepgram.extensions.core.instrumented_socket import apply_websocket_instrumentation
-from deepgram.extensions.core.telemetry_events import TelemetryHttpEvents, TelemetrySocketEvents
-from deepgram.extensions.telemetry.batching_handler import BatchingTelemetryHandler
-from deepgram.extensions.telemetry.handler import TelemetryHandler
-from deepgram.extensions.telemetry.proto_encoder import encode_telemetry_batch
-
-
-def _create_telemetry_context(session_id: str) -> Dict[str, Any]:
- """Create telemetry context with SDK and environment information."""
- try:
- # Get package version
- try:
- from . import version
- package_version = version.__version__
- except ImportError:
- package_version = "unknown"
-
- return {
- "package_name": "python-sdk",
- "package_version": package_version,
- "language": "python",
- "runtime_version": f"python {sys.version.split()[0]}",
- "os": platform.system().lower(),
- "arch": platform.machine(),
- "session_id": session_id,
- "environment": os.getenv("DEEPGRAM_ENV", "prod"),
- }
- except Exception:
- # Fallback minimal context
- return {
- "package_name": "python-sdk",
- "language": "python",
- "session_id": session_id,
- }
-
-
-def _setup_telemetry(
- session_id: str,
- telemetry_opt_out: bool,
- telemetry_handler: Optional[TelemetryHandler],
- client_wrapper: BaseClientWrapper,
-) -> Optional[TelemetryHandler]:
- """Setup telemetry for the client."""
- if telemetry_opt_out:
- return None
-
- # Use provided handler or create default batching handler
- if telemetry_handler is None:
- try:
- context = _create_telemetry_context(session_id)
- telemetry_handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.dx.deepgram.com/v1/telemetry",
- api_key=client_wrapper.api_key,
- context_provider=lambda: context,
- synchronous=True, # Use synchronous mode for reliability in short-lived scripts
- batch_size=1, # Send immediately for short-lived scripts
- encode_batch=encode_telemetry_batch, # Add proto encoder
- )
- except Exception:
- # If we can't create the handler, disable telemetry
- return None
-
- # Setup HTTP instrumentation
- try:
- http_events = TelemetryHttpEvents(telemetry_handler)
-
- # Replace the HTTP client with instrumented version
- if hasattr(client_wrapper, 'httpx_client'):
- original_client = client_wrapper.httpx_client
- if hasattr(original_client, 'httpx_client'): # It's already our HttpClient
- instrumented_client = InstrumentedHttpClient(
- delegate=original_client,
- events=http_events,
- )
- client_wrapper.httpx_client = instrumented_client
- except Exception:
- # If instrumentation fails, continue without it
- pass
-
- # Setup WebSocket instrumentation
- try:
- socket_events = TelemetrySocketEvents(telemetry_handler)
- # Apply WebSocket instrumentation to capture connections in generated code
- apply_websocket_instrumentation(socket_events)
- except Exception:
- # If WebSocket instrumentation fails, continue without it
- pass
-
- return telemetry_handler
-
-
-def _setup_async_telemetry(
- session_id: str,
- telemetry_opt_out: bool,
- telemetry_handler: Optional[TelemetryHandler],
- client_wrapper: BaseClientWrapper,
-) -> Optional[TelemetryHandler]:
- """Setup telemetry for the async client."""
- if telemetry_opt_out:
- return None
-
- # Use provided handler or create default batching handler
- if telemetry_handler is None:
- try:
- context = _create_telemetry_context(session_id)
- telemetry_handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.dx.deepgram.com/v1/telemetry",
- api_key=client_wrapper.api_key,
- context_provider=lambda: context,
- synchronous=True, # Use synchronous mode for reliability in short-lived scripts
- batch_size=1, # Send immediately for short-lived scripts
- encode_batch=encode_telemetry_batch, # Add proto encoder
- )
- except Exception:
- # If we can't create the handler, disable telemetry
- return None
-
- # Setup HTTP instrumentation
- try:
- http_events = TelemetryHttpEvents(telemetry_handler)
-
- # Replace the HTTP client with instrumented version
- if hasattr(client_wrapper, 'httpx_client'):
- original_client = client_wrapper.httpx_client
- if hasattr(original_client, 'httpx_client'): # It's already our AsyncHttpClient
- instrumented_client = InstrumentedAsyncHttpClient(
- delegate=original_client,
- events=http_events,
- )
- client_wrapper.httpx_client = instrumented_client
- except Exception:
- # If instrumentation fails, continue without it
- pass
-
- # Setup WebSocket instrumentation
- try:
- socket_events = TelemetrySocketEvents(telemetry_handler)
- # Apply WebSocket instrumentation to capture connections in generated code
- apply_websocket_instrumentation(socket_events)
- except Exception:
- # If WebSocket instrumentation fails, continue without it
- pass
-
- return telemetry_handler
-
-
-def _apply_bearer_authorization_override(client_wrapper: BaseClientWrapper, bearer_token: str) -> None:
- """Override header providers to always use a Bearer authorization token.
-
- This updates both:
- - client_wrapper.get_headers() used by WebSocket clients
- - client_wrapper.httpx_client.base_headers used by HTTP clients
- """
- original_get_headers = client_wrapper.get_headers
-
- def _get_headers_with_bearer(_self: Any) -> Dict[str, str]:
- headers = original_get_headers()
- headers["Authorization"] = f"bearer {bearer_token}"
- return headers
-
- # Override on wrapper for WebSockets
- client_wrapper.get_headers = types.MethodType(_get_headers_with_bearer, client_wrapper) # type: ignore[method-assign]
-
- # Override on HTTP client for REST requests
- if hasattr(client_wrapper, "httpx_client") and hasattr(client_wrapper.httpx_client, "base_headers"):
- client_wrapper.httpx_client.base_headers = client_wrapper.get_headers
-
-class DeepgramClient(BaseClient):
- def __init__(self, *args, **kwargs) -> None:
- access_token: Optional[str] = kwargs.pop("access_token", None)
- telemetry_opt_out: bool = bool(kwargs.pop("telemetry_opt_out", True))
- telemetry_handler: Optional[TelemetryHandler] = kwargs.pop("telemetry_handler", None)
-
- # Generate a session id up-front so it can be placed into headers for all transports
- generated_session_id = str(uuid.uuid4())
-
- # Ensure headers object exists for pass-through custom headers
- headers: Optional[Dict[str, str]] = kwargs.get("headers")
- if headers is None:
- headers = {}
- kwargs["headers"] = headers
-
- # Ensure every request has a session identifier header
- headers["x-deepgram-session-id"] = generated_session_id
-
- # If an access_token is provided, force api_key to a placeholder that will be overridden
- if access_token is not None:
- kwargs["api_key"] = "token"
-
- super().__init__(*args, **kwargs)
- self.session_id = generated_session_id
-
- if access_token is not None:
- _apply_bearer_authorization_override(self._client_wrapper, access_token)
-
- # Setup telemetry
- self._telemetry_handler = _setup_telemetry(
- session_id=generated_session_id,
- telemetry_opt_out=telemetry_opt_out,
- telemetry_handler=telemetry_handler,
- client_wrapper=self._client_wrapper,
- )
-
-class AsyncDeepgramClient(AsyncBaseClient):
- def __init__(self, *args, **kwargs) -> None:
- access_token: Optional[str] = kwargs.pop("access_token", None)
- telemetry_opt_out: bool = bool(kwargs.pop("telemetry_opt_out", True))
- telemetry_handler: Optional[TelemetryHandler] = kwargs.pop("telemetry_handler", None)
-
- # Generate a session id up-front so it can be placed into headers for all transports
- generated_session_id = str(uuid.uuid4())
-
- # Ensure headers object exists for pass-through custom headers
- headers: Optional[Dict[str, str]] = kwargs.get("headers")
- if headers is None:
- headers = {}
- kwargs["headers"] = headers
-
- # Ensure every request has a session identifier header
- headers["x-deepgram-session-id"] = generated_session_id
-
- # If an access_token is provided, force api_key to a placeholder that will be overridden
- if access_token is not None:
- kwargs["api_key"] = "token"
-
- super().__init__(*args, **kwargs)
- self.session_id = generated_session_id
-
- if access_token is not None:
- _apply_bearer_authorization_override(self._client_wrapper, access_token)
-
- # Setup telemetry
- self._telemetry_handler = _setup_async_telemetry(
- session_id=generated_session_id,
- telemetry_opt_out=telemetry_opt_out,
- telemetry_handler=telemetry_handler,
- client_wrapper=self._client_wrapper,
- )
\ No newline at end of file
diff --git a/src/deepgram/core/client_wrapper.py b/src/deepgram/core/client_wrapper.py
index 806d0202..24b4997b 100644
--- a/src/deepgram/core/client_wrapper.py
+++ b/src/deepgram/core/client_wrapper.py
@@ -23,11 +23,10 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
+ "User-Agent": "deepgram-sdk/5.3.1",
"X-Fern-Language": "Python",
- "X-Fern-SDK-Name": "deepgram",
- # x-release-please-start-version
- "X-Fern-SDK-Version": "5.3.0",
- # x-release-please-end
+ "X-Fern-SDK-Name": "deepgram-sdk",
+ "X-Fern-SDK-Version": "5.3.1",
**(self.get_custom_headers() or {}),
}
headers["Authorization"] = f"Token {self.api_key}"
diff --git a/src/deepgram/core/http_sse/__init__.py b/src/deepgram/core/http_sse/__init__.py
new file mode 100644
index 00000000..730e5a33
--- /dev/null
+++ b/src/deepgram/core/http_sse/__init__.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from ._api import EventSource, aconnect_sse, connect_sse
+ from ._exceptions import SSEError
+ from ._models import ServerSentEvent
+_dynamic_imports: typing.Dict[str, str] = {
+ "EventSource": "._api",
+ "SSEError": "._exceptions",
+ "ServerSentEvent": "._models",
+ "aconnect_sse": "._api",
+ "connect_sse": "._api",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = ["EventSource", "SSEError", "ServerSentEvent", "aconnect_sse", "connect_sse"]
diff --git a/src/deepgram/core/http_sse/_api.py b/src/deepgram/core/http_sse/_api.py
new file mode 100644
index 00000000..f900b3b6
--- /dev/null
+++ b/src/deepgram/core/http_sse/_api.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import re
+from contextlib import asynccontextmanager, contextmanager
+from typing import Any, AsyncGenerator, AsyncIterator, Iterator, cast
+
+import httpx
+from ._decoders import SSEDecoder
+from ._exceptions import SSEError
+from ._models import ServerSentEvent
+
+
+class EventSource:
+ def __init__(self, response: httpx.Response) -> None:
+ self._response = response
+
+ def _check_content_type(self) -> None:
+ content_type = self._response.headers.get("content-type", "").partition(";")[0]
+ if "text/event-stream" not in content_type:
+ raise SSEError(
+ f"Expected response header Content-Type to contain 'text/event-stream', got {content_type!r}"
+ )
+
+ def _get_charset(self) -> str:
+ """Extract charset from Content-Type header, fallback to UTF-8."""
+ content_type = self._response.headers.get("content-type", "")
+
+ # Parse charset parameter using regex
+ charset_match = re.search(r"charset=([^;\s]+)", content_type, re.IGNORECASE)
+ if charset_match:
+ charset = charset_match.group(1).strip("\"'")
+ # Validate that it's a known encoding
+ try:
+ # Test if the charset is valid by trying to encode/decode
+ "test".encode(charset).decode(charset)
+ return charset
+ except (LookupError, UnicodeError):
+ # If charset is invalid, fall back to UTF-8
+ pass
+
+ # Default to UTF-8 if no charset specified or invalid charset
+ return "utf-8"
+
+ @property
+ def response(self) -> httpx.Response:
+ return self._response
+
+ def iter_sse(self) -> Iterator[ServerSentEvent]:
+ self._check_content_type()
+ decoder = SSEDecoder()
+ charset = self._get_charset()
+
+ buffer = ""
+ for chunk in self._response.iter_bytes():
+ # Decode chunk using detected charset
+ text_chunk = chunk.decode(charset, errors="replace")
+ buffer += text_chunk
+
+ # Process complete lines
+ while "\n" in buffer:
+ line, buffer = buffer.split("\n", 1)
+ line = line.rstrip("\r")
+ sse = decoder.decode(line)
+ # when we reach a "\n\n" => line = ''
+ # => decoder will attempt to return an SSE Event
+ if sse is not None:
+ yield sse
+
+ # Process any remaining data in buffer
+ if buffer.strip():
+ line = buffer.rstrip("\r")
+ sse = decoder.decode(line)
+ if sse is not None:
+ yield sse
+
+ async def aiter_sse(self) -> AsyncGenerator[ServerSentEvent, None]:
+ self._check_content_type()
+ decoder = SSEDecoder()
+ lines = cast(AsyncGenerator[str, None], self._response.aiter_lines())
+ try:
+ async for line in lines:
+ line = line.rstrip("\n")
+ sse = decoder.decode(line)
+ if sse is not None:
+ yield sse
+ finally:
+ await lines.aclose()
+
+
+@contextmanager
+def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any) -> Iterator[EventSource]:
+ headers = kwargs.pop("headers", {})
+ headers["Accept"] = "text/event-stream"
+ headers["Cache-Control"] = "no-store"
+
+ with client.stream(method, url, headers=headers, **kwargs) as response:
+ yield EventSource(response)
+
+
+@asynccontextmanager
+async def aconnect_sse(
+ client: httpx.AsyncClient,
+ method: str,
+ url: str,
+ **kwargs: Any,
+) -> AsyncIterator[EventSource]:
+ headers = kwargs.pop("headers", {})
+ headers["Accept"] = "text/event-stream"
+ headers["Cache-Control"] = "no-store"
+
+ async with client.stream(method, url, headers=headers, **kwargs) as response:
+ yield EventSource(response)
diff --git a/src/deepgram/core/http_sse/_decoders.py b/src/deepgram/core/http_sse/_decoders.py
new file mode 100644
index 00000000..339b0890
--- /dev/null
+++ b/src/deepgram/core/http_sse/_decoders.py
@@ -0,0 +1,61 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from typing import List, Optional
+
+from ._models import ServerSentEvent
+
+
+class SSEDecoder:
+ def __init__(self) -> None:
+ self._event = ""
+ self._data: List[str] = []
+ self._last_event_id = ""
+ self._retry: Optional[int] = None
+
+ def decode(self, line: str) -> Optional[ServerSentEvent]:
+ # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
+
+ if not line:
+ if not self._event and not self._data and not self._last_event_id and self._retry is None:
+ return None
+
+ sse = ServerSentEvent(
+ event=self._event,
+ data="\n".join(self._data),
+ id=self._last_event_id,
+ retry=self._retry,
+ )
+
+ # NOTE: as per the SSE spec, do not reset last_event_id.
+ self._event = ""
+ self._data = []
+ self._retry = None
+
+ return sse
+
+ if line.startswith(":"):
+ return None
+
+ fieldname, _, value = line.partition(":")
+
+ if value.startswith(" "):
+ value = value[1:]
+
+ if fieldname == "event":
+ self._event = value
+ elif fieldname == "data":
+ self._data.append(value)
+ elif fieldname == "id":
+ if "\0" in value:
+ pass
+ else:
+ self._last_event_id = value
+ elif fieldname == "retry":
+ try:
+ self._retry = int(value)
+ except (TypeError, ValueError):
+ pass
+ else:
+ pass # Field is ignored.
+
+ return None
diff --git a/src/deepgram/core/http_sse/_exceptions.py b/src/deepgram/core/http_sse/_exceptions.py
new file mode 100644
index 00000000..81605a8a
--- /dev/null
+++ b/src/deepgram/core/http_sse/_exceptions.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import httpx
+
+
+class SSEError(httpx.TransportError):
+ pass
diff --git a/src/deepgram/core/http_sse/_models.py b/src/deepgram/core/http_sse/_models.py
new file mode 100644
index 00000000..1af57f8f
--- /dev/null
+++ b/src/deepgram/core/http_sse/_models.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import json
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass(frozen=True)
+class ServerSentEvent:
+ event: str = "message"
+ data: str = ""
+ id: str = ""
+ retry: Optional[int] = None
+
+ def json(self) -> Any:
+ """Parse the data field as JSON."""
+ return json.loads(self.data)
diff --git a/src/deepgram/core/pydantic_utilities.py b/src/deepgram/core/pydantic_utilities.py
index 8906cdfa..185e5c4f 100644
--- a/src/deepgram/core/pydantic_utilities.py
+++ b/src/deepgram/core/pydantic_utilities.py
@@ -220,7 +220,9 @@ def universal_root_validator(
) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
if IS_PYDANTIC_V2:
- return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
+ # In Pydantic v2, for RootModel we always use "before" mode
+ # The custom validators transform the input value before the model is created
+ return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined]
return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
return decorator
diff --git a/src/deepgram/extensions/__init__.py b/src/deepgram/extensions/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/deepgram/extensions/core/__init__.py b/src/deepgram/extensions/core/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/deepgram/extensions/core/instrumented_http.py b/src/deepgram/extensions/core/instrumented_http.py
deleted file mode 100644
index 214683ac..00000000
--- a/src/deepgram/extensions/core/instrumented_http.py
+++ /dev/null
@@ -1,395 +0,0 @@
-from __future__ import annotations
-
-import time
-import typing
-
-import httpx
-from ...core.file import File
-from ...core.http_client import AsyncHttpClient as GeneratedAsyncHttpClient
-from ...core.http_client import HttpClient as GeneratedHttpClient
-from ...core.request_options import RequestOptions
-
-
-class HttpEvents(typing.Protocol):
- def on_http_request(
- self,
- *,
- method: str,
- url: str,
- headers: typing.Union[typing.Mapping[str, str], None],
- extras: typing.Union[typing.Mapping[str, str], None] = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None: ...
-
- def on_http_response(
- self,
- *,
- method: str,
- url: str,
- status_code: int,
- duration_ms: float,
- headers: typing.Union[typing.Mapping[str, str], None],
- extras: typing.Union[typing.Mapping[str, str], None] = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None: ...
-
- def on_http_error(
- self,
- *,
- method: str,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None: ...
-
-
-def _compose_url(base_url: typing.Optional[str], path: typing.Optional[str]) -> str:
- if base_url is None or path is None:
- return ""
- return f"{base_url}/{path}" if not str(base_url).endswith("/") else f"{base_url}{path}"
-
-
-class InstrumentedHttpClient(GeneratedHttpClient):
- def __init__(self, *, delegate: GeneratedHttpClient, events: HttpEvents | None):
- super().__init__(
- httpx_client=delegate.httpx_client,
- base_timeout=delegate.base_timeout,
- base_headers=delegate.base_headers,
- base_url=delegate.base_url,
- )
- self._delegate = delegate
- self._events = events
-
- def request(
- self,
- path: typing.Optional[str] = None,
- *,
- method: str,
- base_url: typing.Optional[str] = None,
- params: typing.Optional[typing.Dict[str, typing.Any]] = None,
- json: typing.Optional[typing.Any] = None,
- data: typing.Optional[typing.Any] = None,
- content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
- files: typing.Optional[
- typing.Union[
- typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
- typing.List[typing.Tuple[str, File]],
- ]
- ] = None,
- headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
- request_options: typing.Optional[RequestOptions] = None,
- retries: int = 2,
- omit: typing.Optional[typing.Any] = None,
- force_multipart: typing.Optional[bool] = None,
- ) -> httpx.Response:
- url = _compose_url(base_url, path)
-
- start = time.perf_counter()
- try:
- if self._events is not None:
- # Filter request headers for telemetry extras
- try:
- from .telemetry_events import (
- capture_request_details,
- # filter_sensitive_headers, # No longer needed - using privacy-focused capture
- )
- # No longer filter headers - use privacy-focused request_details instead
- extras = None
- request_details = capture_request_details(
- method=method,
- url=url,
- headers=headers,
- params=params,
- json=json,
- data=data,
- files=files,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
- except Exception:
- extras = None
- request_details = None
-
- self._events.on_http_request(
- method=method,
- url=url or "",
- headers=headers,
- extras=extras,
- request_details=request_details,
- )
- except Exception:
- pass
- try:
- resp = super().request(
- path=path,
- method=method,
- base_url=base_url,
- params=params,
- json=json,
- data=data,
- content=content,
- files=files,
- headers=headers,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
- duration_ms = (time.perf_counter() - start) * 1000.0
- try:
- if self._events is not None:
- response_headers = typing.cast(typing.Union[typing.Mapping[str, str], None], getattr(resp, "headers", None))
- # Filter response headers for telemetry extras
- try:
- from .telemetry_events import (
- capture_response_details,
- # filter_sensitive_headers, # No longer needed - using privacy-focused capture
- )
- # No longer filter response headers - use privacy-focused response_details instead
- extras = None
- response_details = capture_response_details(resp)
- except Exception:
- extras = None
- response_details = None
-
- self._events.on_http_response(
- method=method,
- url=url or "",
- status_code=resp.status_code,
- duration_ms=duration_ms,
- headers=response_headers,
- extras=extras,
- response_details=response_details,
- )
- except Exception:
- pass
- return resp
- except Exception as exc:
- duration_ms = (time.perf_counter() - start) * 1000.0
- try:
- if self._events is not None:
- # Capture comprehensive error details
- try:
- from .telemetry_events import (
- capture_request_details,
- capture_response_details,
- )
-
- # Capture full request details
- request_details = capture_request_details(
- method=method,
- url=url,
- headers=headers,
- params=params,
- json=json,
- data=data,
- files=files,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
-
- # Try to capture response details from exception
- response_details = {}
- if hasattr(exc, 'response'):
- response_details = capture_response_details(exc.response)
- elif hasattr(exc, 'status_code'):
- response_details['status_code'] = getattr(exc, 'status_code', None)
- if hasattr(exc, 'headers'):
- response_details['headers'] = dict(getattr(exc, 'headers', {}))
-
- except Exception:
- request_details = None
- response_details = None
-
- self._events.on_http_error(
- method=method,
- url=url or "",
- error=exc,
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
- raise
-
- # Inherit stream() from base class without modification
-
-
-class InstrumentedAsyncHttpClient(GeneratedAsyncHttpClient):
- def __init__(self, *, delegate: GeneratedAsyncHttpClient, events: HttpEvents | None):
- super().__init__(
- httpx_client=delegate.httpx_client,
- base_timeout=delegate.base_timeout,
- base_headers=delegate.base_headers,
- base_url=delegate.base_url,
- )
- self._delegate = delegate
- self._events = events
-
- async def request(
- self,
- path: typing.Optional[str] = None,
- *,
- method: str,
- base_url: typing.Optional[str] = None,
- params: typing.Optional[typing.Dict[str, typing.Any]] = None,
- json: typing.Optional[typing.Any] = None,
- data: typing.Optional[typing.Any] = None,
- content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
- files: typing.Optional[
- typing.Union[
- typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
- typing.List[typing.Tuple[str, File]],
- ]
- ] = None,
- headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
- request_options: typing.Optional[RequestOptions] = None,
- retries: int = 2,
- omit: typing.Optional[typing.Any] = None,
- force_multipart: typing.Optional[bool] = None,
- ) -> httpx.Response:
- url = _compose_url(base_url, path)
-
- start = time.perf_counter()
- try:
- if self._events is not None:
- # Filter request headers for telemetry extras
- try:
- from .telemetry_events import (
- capture_request_details,
- # filter_sensitive_headers, # No longer needed - using privacy-focused capture
- )
- # No longer filter headers - use privacy-focused request_details instead
- extras = None
- request_details = capture_request_details(
- method=method,
- url=url,
- headers=headers,
- params=params,
- json=json,
- data=data,
- files=files,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
- except Exception:
- extras = None
- request_details = None
-
- self._events.on_http_request(
- method=method,
- url=url or "",
- headers=headers,
- extras=extras,
- request_details=request_details,
- )
- except Exception:
- pass
- try:
- resp = await super().request(
- path=path,
- method=method,
- base_url=base_url,
- params=params,
- json=json,
- data=data,
- content=content,
- files=files,
- headers=headers,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
- duration_ms = (time.perf_counter() - start) * 1000.0
- try:
- if self._events is not None:
- response_headers = typing.cast(typing.Union[typing.Mapping[str, str], None], getattr(resp, "headers", None))
- # Filter response headers for telemetry extras
- try:
- from .telemetry_events import (
- capture_response_details,
- # filter_sensitive_headers, # No longer needed - using privacy-focused capture
- )
- # No longer filter response headers - use privacy-focused response_details instead
- extras = None
- response_details = capture_response_details(resp)
- except Exception:
- extras = None
- response_details = None
-
- self._events.on_http_response(
- method=method,
- url=url or "",
- status_code=resp.status_code,
- duration_ms=duration_ms,
- headers=response_headers,
- extras=extras,
- response_details=response_details,
- )
- except Exception:
- pass
- return resp
- except Exception as exc:
- duration_ms = (time.perf_counter() - start) * 1000.0
- try:
- if self._events is not None:
- # Capture comprehensive error details
- try:
- from .telemetry_events import (
- capture_request_details,
- capture_response_details,
- )
-
- # Capture full request details
- request_details = capture_request_details(
- method=method,
- url=url,
- headers=headers,
- params=params,
- json=json,
- data=data,
- files=files,
- request_options=request_options,
- retries=retries,
- omit=omit,
- force_multipart=force_multipart,
- )
-
- # Try to capture response details from exception
- response_details = {}
- if hasattr(exc, 'response'):
- response_details = capture_response_details(exc.response)
- elif hasattr(exc, 'status_code'):
- response_details['status_code'] = getattr(exc, 'status_code', None)
- if hasattr(exc, 'headers'):
- response_details['headers'] = dict(getattr(exc, 'headers', {}))
-
- except Exception:
- request_details = None
- response_details = None
-
- self._events.on_http_error(
- method=method,
- url=url or "",
- error=exc,
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
- raise
-
- # Inherit stream() from base class without modification
-
-
diff --git a/src/deepgram/extensions/core/instrumented_socket.py b/src/deepgram/extensions/core/instrumented_socket.py
deleted file mode 100644
index 128bec9d..00000000
--- a/src/deepgram/extensions/core/instrumented_socket.py
+++ /dev/null
@@ -1,407 +0,0 @@
-"""
-Instrumented WebSocket clients for telemetry.
-
-This module provides WebSocket client wrappers that automatically capture
-telemetry events, following the same pattern as instrumented_http.py.
-"""
-
-import functools
-import time
-import typing
-from contextlib import asynccontextmanager, contextmanager
-from typing import Union
-
-import websockets.exceptions
-import websockets.sync.client as websockets_sync_client
-
-try:
- from websockets.legacy.client import connect as websockets_client_connect # type: ignore
-except ImportError:
- from websockets import connect as websockets_client_connect # type: ignore
-
-try:
- import websockets.sync.connection as websockets_sync_connection
- from websockets.legacy.client import WebSocketClientProtocol # type: ignore
-except ImportError:
- try:
- import websockets.sync.connection as websockets_sync_connection
- from websockets import WebSocketClientProtocol # type: ignore
- except ImportError:
- # Fallback types
- WebSocketClientProtocol = typing.Any # type: ignore[misc,assignment]
- websockets_sync_connection = typing.Any # type: ignore[misc,assignment]
-
-
-class SocketEvents(typing.Protocol):
- """Protocol for WebSocket telemetry events."""
-
- def on_ws_connect(
- self,
- *,
- url: str,
- headers: Union[typing.Mapping[str, str], None] = None,
- extras: Union[typing.Mapping[str, str], None] = None,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None: ...
-
- def on_ws_error(
- self,
- *,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- response_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None: ...
-
- def on_ws_close(
- self,
- *,
- url: str,
- duration_ms: float,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- response_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None: ...
-
-
-def _capture_request_details(method: str, url: str, headers: Union[typing.Dict[str, str], None] = None, **kwargs) -> typing.Dict[str, typing.Any]:
- """Capture request details for telemetry (avoiding circular import)."""
- details: typing.Dict[str, typing.Any] = {
- "method": method,
- "url": url,
- }
- if headers:
- details["headers"] = dict(headers)
-
- # Add connection parameters for WebSocket requests
- for key, value in kwargs.items():
- if value is not None:
- details[key] = value
-
- return details
-
-
-def _capture_response_details(**kwargs) -> typing.Dict[str, typing.Any]:
- """Capture response details for telemetry (avoiding circular import)."""
- details = {}
- for key, value in kwargs.items():
- if value is not None:
- details[key] = value
- return details
-
-
-def _instrument_sync_connect(original_connect, events: Union[SocketEvents, None] = None):
- """Wrap sync websockets.sync.client.connect to add telemetry."""
-
- @functools.wraps(original_connect)
- def instrumented_connect(uri, *args, additional_headers: Union[typing.Dict[str, str], None] = None, **kwargs):
- start_time = time.perf_counter()
-
- # Capture detailed request information including all connection parameters
- request_details = _capture_request_details(
- method="WS_CONNECT",
- url=str(uri),
- headers=additional_headers,
- function_name="websockets.sync.client.connect",
- connection_args=args,
- connection_kwargs=kwargs,
- )
-
- # Emit connect event
- if events:
- try:
- events.on_ws_connect(
- url=str(uri),
- headers=additional_headers,
- request_details=request_details,
- )
- except Exception:
- pass
-
- try:
- # Call original connect
- connection = original_connect(uri, *args, additional_headers=additional_headers, **kwargs)
-
- # Wrap the connection to capture close event
- if events:
- original_close = connection.close
-
- def instrumented_close(*close_args, **close_kwargs):
- duration_ms = (time.perf_counter() - start_time) * 1000
- response_details = _capture_response_details(
- status_code=1000, # Normal close
- duration_ms=duration_ms
- )
-
- try:
- events.on_ws_close(
- url=str(uri),
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
-
- return original_close(*close_args, **close_kwargs)
-
- connection.close = instrumented_close
-
- return connection
-
- except Exception as error:
- import traceback
-
- duration_ms = (time.perf_counter() - start_time) * 1000
-
- # Capture detailed error information
- response_details = _capture_response_details(
- error=error,
- duration_ms=duration_ms,
- error_type=type(error).__name__,
- error_message=str(error),
- stack_trace=traceback.format_exc(),
- function_name="websockets.sync.client.connect",
- timeout_occurred="timeout" in str(error).lower() or "timed out" in str(error).lower(),
- )
-
- # Capture WebSocket handshake response headers if available
- try:
- # Handle InvalidStatusCode exceptions (handshake failures)
- if error.__class__.__name__ == 'InvalidStatusCode':
- # Status code is directly available
- if hasattr(error, 'status_code'):
- response_details["handshake_status_code"] = error.status_code
-
- # Headers are directly available as e.headers
- if hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
-
- # Some versions might have response_headers
- elif hasattr(error, 'response_headers') and error.response_headers:
- response_details["handshake_response_headers"] = dict(error.response_headers)
-
- # Handle InvalidHandshake exceptions (protocol-level failures)
- elif error.__class__.__name__ == 'InvalidHandshake':
- response_details["handshake_error_type"] = "InvalidHandshake"
- if hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
-
- # Generic fallback for any exception with headers
- elif hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
- elif hasattr(error, 'response_headers') and error.response_headers:
- response_details["handshake_response_headers"] = dict(error.response_headers)
-
- # Capture status code if available (for any exception type)
- if hasattr(error, 'status_code') and not response_details.get("handshake_status_code"):
- response_details["handshake_status_code"] = error.status_code
-
- except Exception:
- # Don't let header extraction fail the error handling
- pass
-
- if events:
- try:
- events.on_ws_error(
- url=str(uri),
- error=error,
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
- raise
-
- return instrumented_connect
-
-
-def _instrument_async_connect(original_connect, events: Union[SocketEvents, None] = None):
- """Wrap async websockets.connect to add telemetry."""
-
- @functools.wraps(original_connect)
- def instrumented_connect(uri, *args, extra_headers: Union[typing.Dict[str, str], None] = None, **kwargs):
- start_time = time.perf_counter()
-
- # Capture detailed request information including all connection parameters
- request_details = _capture_request_details(
- method="WS_CONNECT",
- url=str(uri),
- headers=extra_headers,
- function_name="websockets.client.connect",
- connection_args=args,
- connection_kwargs=kwargs,
- )
-
- # Emit connect event
- if events:
- try:
- events.on_ws_connect(
- url=str(uri),
- headers=extra_headers,
- request_details=request_details,
- )
- except Exception:
- pass
-
- # Return an async context manager
- @asynccontextmanager
- async def instrumented_context():
- try:
- # Call original connect
- async with original_connect(uri, *args, extra_headers=extra_headers, **kwargs) as connection:
- # Wrap the connection to capture close event
- if events:
- original_close = connection.close
-
- async def instrumented_close(*close_args, **close_kwargs):
- duration_ms = (time.perf_counter() - start_time) * 1000
- response_details = _capture_response_details(
- status_code=1000, # Normal close
- duration_ms=duration_ms
- )
-
- try:
- events.on_ws_close(
- url=str(uri),
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
-
- return await original_close(*close_args, **close_kwargs)
-
- connection.close = instrumented_close
-
- yield connection
-
- # Also emit close event when context exits (if connection wasn't manually closed)
- if events:
- try:
- duration_ms = (time.perf_counter() - start_time) * 1000
- response_details = _capture_response_details(
- status_code=1000, # Normal close
- duration_ms=duration_ms
- )
- events.on_ws_close(
- url=str(uri),
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
-
- except Exception as error:
- import traceback
-
- duration_ms = (time.perf_counter() - start_time) * 1000
-
- # Capture detailed error information
- response_details = _capture_response_details(
- error=error,
- duration_ms=duration_ms,
- error_type=type(error).__name__,
- error_message=str(error),
- stack_trace=traceback.format_exc(),
- function_name="websockets.client.connect",
- timeout_occurred="timeout" in str(error).lower() or "timed out" in str(error).lower(),
- )
-
- # Capture WebSocket handshake response headers if available
- try:
- # Handle InvalidStatusCode exceptions (handshake failures)
- if error.__class__.__name__ == 'InvalidStatusCode':
- # Status code is directly available
- if hasattr(error, 'status_code'):
- response_details["handshake_status_code"] = error.status_code
-
- # Headers are directly available as e.headers
- if hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
-
- # Some versions might have response_headers
- elif hasattr(error, 'response_headers') and error.response_headers:
- response_details["handshake_response_headers"] = dict(error.response_headers)
-
- # Handle InvalidHandshake exceptions (protocol-level failures)
- elif error.__class__.__name__ == 'InvalidHandshake':
- response_details["handshake_error_type"] = "InvalidHandshake"
- if hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
-
- # Generic fallback for any exception with headers
- elif hasattr(error, 'headers') and error.headers:
- response_details["handshake_response_headers"] = dict(error.headers)
- elif hasattr(error, 'response_headers') and error.response_headers:
- response_details["handshake_response_headers"] = dict(error.response_headers)
-
- # Capture status code if available (for any exception type)
- if hasattr(error, 'status_code') and not response_details.get("handshake_status_code"):
- response_details["handshake_status_code"] = error.status_code
-
- except Exception:
- # Don't let header extraction fail the error handling
- pass
-
- if events:
- try:
- events.on_ws_error(
- url=str(uri),
- error=error,
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
- raise
-
- return instrumented_context()
-
- return instrumented_connect
-
-
-def apply_websocket_instrumentation(socket_events: Union[SocketEvents, None] = None):
- """Apply WebSocket instrumentation globally using monkey-patching."""
- try:
- # Patch sync websockets
- if not hasattr(websockets_sync_client.connect, '_deepgram_instrumented'): # type: ignore[attr-defined]
- original_sync_connect = websockets_sync_client.connect
- websockets_sync_client.connect = _instrument_sync_connect(original_sync_connect, socket_events)
- websockets_sync_client.connect._deepgram_instrumented = True # type: ignore[attr-defined]
- except Exception:
- pass
-
- try:
- # Patch async websockets (legacy)
- try:
- from websockets.legacy.client import connect as legacy_connect
- if not hasattr(legacy_connect, '_deepgram_instrumented'): # type: ignore[attr-defined]
- instrumented_legacy = _instrument_async_connect(legacy_connect, socket_events)
-
- # Replace in the module
- import websockets.legacy.client as legacy_client
- legacy_client.connect = instrumented_legacy
- instrumented_legacy._deepgram_instrumented = True # type: ignore[attr-defined]
- except ImportError:
- pass
-
- # Patch async websockets (current)
- try:
- from websockets import connect as current_connect
- if not hasattr(current_connect, '_deepgram_instrumented'): # type: ignore[attr-defined]
- instrumented_current = _instrument_async_connect(current_connect, socket_events)
-
- # Replace in the module
- import websockets
- websockets.connect = instrumented_current
- instrumented_current._deepgram_instrumented = True # type: ignore[attr-defined]
- except ImportError:
- pass
-
- except Exception:
- pass
diff --git a/src/deepgram/extensions/core/telemetry_events.py b/src/deepgram/extensions/core/telemetry_events.py
deleted file mode 100644
index 9eaa8a87..00000000
--- a/src/deepgram/extensions/core/telemetry_events.py
+++ /dev/null
@@ -1,306 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, Mapping
-
-from ..telemetry.handler import TelemetryHandler
-from .instrumented_http import HttpEvents
-from .instrumented_socket import SocketEvents
-
-
-class TelemetryHttpEvents(HttpEvents):
- def __init__(self, handler: TelemetryHandler):
- self._handler = handler
-
- def on_http_request(
- self,
- *,
- method: str,
- url: str,
- headers: Mapping[str, str] | None,
- extras: Mapping[str, str] | None = None,
- request_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_http_request(
- method=method,
- url=url,
- headers=headers,
- extras=extras,
- request_details=request_details,
- )
- except Exception:
- pass
-
- def on_http_response(
- self,
- *,
- method: str,
- url: str,
- status_code: int,
- duration_ms: float,
- headers: Mapping[str, str] | None,
- extras: Mapping[str, str] | None = None,
- response_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_http_response(
- method=method,
- url=url,
- status_code=status_code,
- duration_ms=duration_ms,
- headers=headers,
- extras=extras,
- response_details=response_details,
- )
- except Exception:
- pass
-
- def on_http_error(
- self,
- *,
- method: str,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: Mapping[str, Any] | None = None,
- response_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_http_error(
- method=method,
- url=url,
- error=error,
- duration_ms=duration_ms,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
-
-
-class TelemetrySocketEvents(SocketEvents):
- """Implementation of WebSocket events that forwards to a telemetry handler."""
-
- def __init__(self, handler: TelemetryHandler):
- self._handler = handler
-
- def on_ws_connect(
- self,
- *,
- url: str,
- headers: Mapping[str, str] | None = None,
- extras: Mapping[str, str] | None = None,
- request_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_ws_connect(
- url=url,
- headers=headers,
- extras=extras,
- request_details=request_details,
- )
- except Exception:
- pass
-
- def on_ws_error(
- self,
- *,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: Mapping[str, Any] | None = None,
- response_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_ws_error(
- url=url,
- error=error,
- extras=None,
- request_details=request_details,
- response_details=response_details,
- )
- except Exception:
- pass
-
- def on_ws_close(
- self,
- *,
- url: str,
- duration_ms: float,
- request_details: Mapping[str, Any] | None = None,
- response_details: Mapping[str, Any] | None = None,
- ) -> None:
- try:
- self._handler.on_ws_close(
- url=url,
- )
- except Exception:
- pass
-
-
-def filter_sensitive_headers(headers: Mapping[str, str] | None) -> Dict[str, str] | None:
- """Filter out sensitive headers from telemetry, keeping all safe headers."""
- if not headers:
- return None
-
- # Headers to exclude from telemetry for security
- sensitive_prefixes = ('authorization', 'sec-', 'cookie', 'x-api-key', 'x-auth')
- sensitive_headers = {'authorization', 'cookie', 'set-cookie', 'x-api-key', 'x-auth-token', 'bearer'}
-
- filtered_headers = {}
- for key, value in headers.items():
- key_lower = key.lower()
-
- # Skip sensitive headers
- if key_lower in sensitive_headers:
- continue
- if any(key_lower.startswith(prefix) for prefix in sensitive_prefixes):
- continue
-
- filtered_headers[key] = str(value)
-
- return filtered_headers if filtered_headers else None
-
-
-def extract_deepgram_headers(headers: Mapping[str, str] | None) -> Dict[str, str] | None:
- """Extract x-dg-* headers from response headers."""
- if not headers:
- return None
-
- dg_headers = {}
- for key, value in headers.items():
- if key.lower().startswith('x-dg-'):
- dg_headers[key.lower()] = str(value)
-
- return dg_headers if dg_headers else None
-
-
-def capture_request_details(
- method: str | None = None,
- url: str | None = None,
- headers: Mapping[str, str] | None = None,
- params: Mapping[str, Any] | None = None,
- **kwargs
-) -> Dict[str, Any]:
- """Capture comprehensive request details for telemetry (keys only for privacy)."""
- details: Dict[str, Any] = {}
-
- if method:
- details['method'] = method
-
- # For URL, capture the structure but not query parameters with values
- if url:
- details['url_structure'] = _extract_url_structure(url)
-
- # For headers, capture only the keys (not values) for privacy
- if headers:
- details['header_keys'] = sorted(list(headers.keys()))
- details['header_count'] = len(headers)
-
- # For query parameters, capture only the keys (not values) for privacy
- if params:
- details['param_keys'] = sorted(list(params.keys()))
- details['param_count'] = len(params)
-
- # For body content, capture type information but not actual content
- if 'json' in kwargs and kwargs['json'] is not None:
- details['has_json_body'] = True
- details['json_body_type'] = type(kwargs['json']).__name__
-
- if 'data' in kwargs and kwargs['data'] is not None:
- details['has_data_body'] = True
- details['data_body_type'] = type(kwargs['data']).__name__
-
- if 'content' in kwargs and kwargs['content'] is not None:
- details['has_content_body'] = True
- details['content_body_type'] = type(kwargs['content']).__name__
-
- if 'files' in kwargs and kwargs['files'] is not None:
- details['has_files'] = True
- details['files_type'] = type(kwargs['files']).__name__
-
- # Capture any additional request context (excluding sensitive data)
- safe_kwargs = ['timeout', 'follow_redirects', 'max_redirects']
- for key in safe_kwargs:
- if key in kwargs and kwargs[key] is not None:
- details[key] = kwargs[key]
-
- return details
-
-
-def _extract_url_structure(url: str) -> Dict[str, Any]:
- """Extract URL structure without exposing sensitive query parameter values."""
- try:
- from urllib.parse import parse_qs, urlparse
-
- parsed = urlparse(url)
- structure: Dict[str, Any] = {
- 'scheme': parsed.scheme,
- 'hostname': parsed.hostname,
- 'port': parsed.port,
- 'path': parsed.path,
- }
-
- # For query string, only capture the parameter keys, not values
- if parsed.query:
- query_params = parse_qs(parsed.query, keep_blank_values=True)
- structure['query_param_keys'] = sorted(list(query_params.keys()))
- structure['query_param_count'] = len(query_params)
-
- return structure
- except Exception:
- # If URL parsing fails, just return a safe representation
- return {'url_parse_error': True, 'url_length': len(url)}
-
-
-def capture_response_details(response: Any = None, **kwargs) -> Dict[str, Any]:
- """Capture comprehensive response details for telemetry (keys only for privacy)."""
- details = {}
-
- if response is not None:
- # Try to extract common response attributes
- try:
- if hasattr(response, 'status_code'):
- details['status_code'] = response.status_code
- if hasattr(response, 'headers'):
- # For response headers, capture only keys (not values) for privacy
- headers = response.headers
- details['response_header_keys'] = sorted(list(headers.keys()))
- details['response_header_count'] = len(headers)
-
- # Extract request_id for server-side correlation (this is safe to log)
- request_id = (headers.get('x-request-id') or
- headers.get('X-Request-Id') or
- headers.get('x-dg-request-id') or
- headers.get('X-DG-Request-Id') or
- headers.get('request-id') or
- headers.get('Request-Id'))
- if request_id:
- details['request_id'] = request_id
-
- if hasattr(response, 'reason_phrase'):
- details['reason_phrase'] = response.reason_phrase
- if hasattr(response, 'url'):
- # For response URL, capture structure but not full URL
- details['response_url_structure'] = _extract_url_structure(str(response.url))
- except Exception:
- pass
-
- # Capture any additional response context (excluding sensitive data)
- safe_kwargs = ['duration_ms', 'error', 'error_type', 'error_message', 'stack_trace',
- 'timeout_occurred', 'function_name']
- for key in safe_kwargs:
- if key in kwargs and kwargs[key] is not None:
- details[key] = kwargs[key]
-
- # Also capture any other non-sensitive context
- for key, value in kwargs.items():
- if (key not in safe_kwargs and
- value is not None and
- key not in ['headers', 'params', 'json', 'data', 'content']): # Exclude potentially sensitive data
- details[key] = value
-
- return details
-
-
-
diff --git a/src/deepgram/extensions/telemetry/__init__.py b/src/deepgram/extensions/telemetry/__init__.py
deleted file mode 100644
index a0fd2921..00000000
--- a/src/deepgram/extensions/telemetry/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""Deepgram telemetry package.
-
-Provides batching telemetry handler, HTTP instrumentation, and protobuf encoding utilities.
-"""
-
-__all__ = [
- "batching_handler",
- "handler",
- "instrumented_http",
- "proto_encoder",
-]
-
-
diff --git a/src/deepgram/extensions/telemetry/batching_handler.py b/src/deepgram/extensions/telemetry/batching_handler.py
deleted file mode 100644
index 00cdc4db..00000000
--- a/src/deepgram/extensions/telemetry/batching_handler.py
+++ /dev/null
@@ -1,658 +0,0 @@
-from __future__ import annotations
-
-import atexit
-import base64
-import os
-import queue
-import threading
-import time
-import traceback
-import typing
-import zlib
-from collections import Counter
-from typing import List
-
-import httpx
-from .handler import TelemetryHandler
-
-
-class BatchingTelemetryHandler(TelemetryHandler):
- """
- Non-blocking telemetry handler that batches events and flushes in the background.
-
- - Enqueues events quickly; never blocks request path
- - Flushes when batch size or max interval is reached
- - Errors trigger an immediate flush attempt
- - Best-effort delivery; drops on full queue rather than blocking
- """
-
- def __init__(
- self,
- *,
- endpoint: str,
- api_key: str | None = None,
- batch_size: int = 20,
- max_interval_seconds: float = 5.0,
- max_queue_size: int = 1000,
- client: typing.Optional[httpx.Client] = None,
- encode_batch: typing.Optional[typing.Callable[..., bytes]] = None,
- encode_batch_iter: typing.Optional[typing.Callable[..., typing.Iterator[bytes]]] = None,
- content_type: str = "application/x-protobuf",
- context_provider: typing.Optional[typing.Callable[[], typing.Mapping[str, typing.Any]]] = None,
- max_consecutive_failures: int = 5,
- synchronous: bool = False,
- ) -> None:
- self._endpoint = endpoint
- self._api_key = api_key
- self._batch_size = max(1, batch_size)
- self._max_interval = max(0.25, max_interval_seconds)
- self._client = client or httpx.Client(timeout=5.0)
- self._encode_batch = encode_batch
- self._encode_batch_iter = encode_batch_iter
- # Always protobuf by default
- self._content_type = content_type
- self._context_provider = context_provider or (lambda: {})
- self._debug = str(os.getenv("DEEPGRAM_DEBUG", "")).lower() in ("1", "true")
- self._max_consecutive_failures = max(1, max_consecutive_failures)
- self._consecutive_failures = 0
- self._disabled = False
- self._synchronous = bool(synchronous)
- if self._synchronous:
- # In synchronous mode, we do not spin a worker; we stage events locally
- self._buffer_sync: List[dict] = []
- else:
- self._queue: queue.Queue[dict] = queue.Queue(maxsize=max_queue_size)
- self._stop_event = threading.Event()
- self._flush_event = threading.Event()
- self._worker = threading.Thread(target=self._run, name="dg-telemetry-worker", daemon=True)
- self._worker.start()
- # Ensure we flush at process exit so short-lived scripts still send (or surface errors in debug)
- atexit.register(self.close)
-
- # --- TelemetryHandler interface ---
-
- def on_http_request(
- self,
- *,
- method: str,
- url: str,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- event = {
- "type": "http_request",
- "ts": time.time(),
- "method": method,
- "url": url,
- }
- # Extract request_id from request_details for server-side correlation
- if request_details and "request_id" in request_details:
- event["request_id"] = request_details["request_id"]
- if extras:
- event["extras"] = dict(extras)
- if request_details:
- event["request_details"] = dict(request_details)
- self._enqueue(event)
-
- def on_http_response(
- self,
- *,
- method: str,
- url: str,
- status_code: int,
- duration_ms: float,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- event = {
- "type": "http_response",
- "ts": time.time(),
- "method": method,
- "url": url,
- "status_code": status_code,
- "duration_ms": duration_ms,
- }
- # Extract request_id from response_details for server-side correlation
- if response_details and "request_id" in response_details:
- event["request_id"] = response_details["request_id"]
- if extras:
- event["extras"] = dict(extras)
- if response_details:
- event["response_details"] = dict(response_details)
- self._enqueue(event)
- # Only promote 5XX server errors to ErrorEvent (not 4XX client errors)
- try:
- if int(status_code) >= 500:
- self._enqueue({
- "type": "http_error",
- "ts": time.time(),
- "method": method,
- "url": url,
- "error": f"HTTP_{status_code}",
- "status_code": status_code,
- "handled": True,
- }, force_flush=True)
- except Exception:
- pass
-
- def on_http_error(
- self,
- *,
- method: str,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- # Filter out 4XX client errors - only capture 5XX server errors and unhandled exceptions
- if response_details:
- status_code = response_details.get('status_code')
- if status_code and isinstance(status_code, int) and 400 <= status_code < 500:
- # Skip 4XX client errors (auth failures, bad requests, etc.)
- return
-
- stack: str = ""
- try:
- stack = "".join(traceback.format_exception(type(error), error, getattr(error, "__traceback__", None)))
- except Exception:
- pass
-
- event = {
- "type": "http_error",
- "ts": time.time(),
- "method": method,
- "url": url,
- "error": type(error).__name__,
- "message": str(error),
- "stack_trace": stack,
- "handled": False,
- "duration_ms": duration_ms,
- }
-
- # Extract request_id for server-side correlation
- if response_details and "request_id" in response_details:
- event["request_id"] = response_details["request_id"]
- elif request_details and "request_id" in request_details:
- event["request_id"] = request_details["request_id"]
-
- # Add comprehensive error context
- if request_details:
- event["request_details"] = dict(request_details)
- if response_details:
- event["response_details"] = dict(response_details)
-
- self._enqueue(event, force_flush=True)
-
- # --- Optional WebSocket signals -> mapped to telemetry ---
- def on_ws_connect(
- self,
- *,
- url: str,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- event = {
- "type": "ws_connect",
- "ts": time.time(),
- "url": url,
- }
- if extras:
- event["extras"] = dict(extras)
- if request_details:
- event["request_details"] = dict(request_details)
- self._enqueue(event)
-
-
- def on_ws_error(
- self,
- *,
- url: str,
- error: BaseException,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- # Use stack trace from response_details if available, otherwise generate it
- stack: str = ""
- if response_details and response_details.get("stack_trace"):
- stack = response_details["stack_trace"]
- else:
- try:
- stack = "".join(traceback.format_exception(type(error), error, getattr(error, "__traceback__", None)))
- except Exception:
- pass
-
- event = {
- "type": "ws_error",
- "ts": time.time(),
- "url": url,
- "error": type(error).__name__,
- "message": str(error),
- "stack_trace": stack,
- "handled": False,
- }
-
- # Add comprehensive error context from response_details
- if response_details:
- event["response_details"] = dict(response_details)
- # Extract specific error details to event level for easier access
- if "error_type" in response_details:
- event["error_type"] = response_details["error_type"]
- if "error_message" in response_details:
- event["error_message"] = response_details["error_message"]
- if "function_name" in response_details:
- event["function_name"] = response_details["function_name"]
- if "duration_ms" in response_details:
- event["duration_ms"] = response_details["duration_ms"]
- if "timeout_occurred" in response_details:
- event["timeout_occurred"] = response_details["timeout_occurred"]
- if "handshake_response_headers" in response_details:
- event["handshake_response_headers"] = response_details["handshake_response_headers"]
- if "handshake_status_code" in response_details:
- event["handshake_status_code"] = response_details["handshake_status_code"]
- if "handshake_reason_phrase" in response_details:
- event["handshake_reason_phrase"] = response_details["handshake_reason_phrase"]
- if "handshake_error_type" in response_details:
- event["handshake_error_type"] = response_details["handshake_error_type"]
-
- # Add request context
- if request_details:
- event["request_details"] = dict(request_details)
- # Extract specific request details for easier access
- if "function_name" in request_details:
- event["sdk_function"] = request_details["function_name"]
- if "connection_kwargs" in request_details:
- event["connection_params"] = request_details["connection_kwargs"]
-
- # Build comprehensive extras with all enhanced telemetry details
- enhanced_extras = dict(extras) if extras else {}
-
- # Add all response details to extras
- if response_details:
- for key, value in response_details.items():
- if key not in enhanced_extras and value is not None:
- enhanced_extras[key] = value
-
- # Add all request details to extras
- if request_details:
- for key, value in request_details.items():
- if key not in enhanced_extras and value is not None:
- enhanced_extras[key] = value
-
- # Add all event-level details to extras
- event_extras = {
- "error_type": event.get("error_type"),
- "error_message": event.get("error_message"),
- "function_name": event.get("function_name"),
- "sdk_function": event.get("sdk_function"),
- "duration_ms": event.get("duration_ms"),
- "timeout_occurred": event.get("timeout_occurred"),
- "handshake_status_code": event.get("handshake_status_code"),
- "handshake_reason_phrase": event.get("handshake_reason_phrase"),
- "handshake_error_type": event.get("handshake_error_type"),
- "connection_params": event.get("connection_params"),
- }
-
- # Add handshake response headers to extras
- handshake_headers = event.get("handshake_response_headers")
- if handshake_headers and hasattr(handshake_headers, 'items'):
- for header_name, header_value in handshake_headers.items(): # type: ignore[attr-defined]
- safe_header_name = header_name.lower().replace('-', '_')
- enhanced_extras[f"handshake_{safe_header_name}"] = str(header_value)
-
- # Merge event extras, excluding None values
- for key, value in event_extras.items():
- if value is not None:
- enhanced_extras[key] = value
-
- # Store the comprehensive extras
- if enhanced_extras:
- event["extras"] = enhanced_extras
-
- self._enqueue(event, force_flush=True)
-
- def on_ws_close(
- self,
- *,
- url: str,
- ) -> None:
- # Close should force a final flush so debug printing happens during short-lived runs
- event = {
- "type": "ws_close",
- "ts": time.time(),
- "url": url,
- }
- self._enqueue(event, force_flush=True)
-
- # Optional: uncaught errors from external hooks
- def on_uncaught_error(self, *, error: BaseException) -> None:
- stack: str = ""
- try:
- stack = "".join(traceback.format_exception(type(error), error, getattr(error, "__traceback__", None)))
- except Exception:
- pass
- self._enqueue({
- "type": "uncaught_error",
- "ts": time.time(),
- "error": type(error).__name__,
- "message": str(error),
- "stack_trace": stack,
- "handled": False,
- }, force_flush=True)
-
- # --- Internal batching ---
-
- def _enqueue(self, event: dict, *, force_flush: bool = False) -> None:
- if self._disabled:
- return
- if self._synchronous:
- # Stage locally and flush according to thresholds immediately in caller thread
- self._buffer_sync.append(event) # type: ignore[attr-defined]
- if len(self._buffer_sync) >= self._batch_size or force_flush: # type: ignore[attr-defined]
- try:
- self._flush(self._buffer_sync) # type: ignore[attr-defined]
- finally:
- self._buffer_sync = [] # type: ignore[attr-defined]
- return
- try:
- self._queue.put_nowait(event)
- except queue.Full:
- # Best-effort: drop rather than blocking request path
- return
- # Wake worker if we hit batch size or need immediate flush
- if self._queue.qsize() >= self._batch_size or force_flush:
- self._flush_event.set()
-
- def _run(self) -> None:
- last_flush = time.time()
- buffer: List[dict] = []
- while not self._stop_event.is_set():
- if self._disabled:
- break
- timeout = max(0.0, self._max_interval - (time.time() - last_flush))
- try:
- item = self._queue.get(timeout=timeout)
- buffer.append(item)
- except queue.Empty:
- pass
-
- # Conditions to flush: batch size, interval elapsed, or explicit signal
- should_flush = (
- len(buffer) >= self._batch_size
- or (time.time() - last_flush) >= self._max_interval
- or self._flush_event.is_set()
- )
- if should_flush and buffer:
- self._flush(buffer)
- buffer = []
- last_flush = time.time()
- self._flush_event.clear()
-
- # Drain on shutdown
- if buffer:
- self._flush(buffer)
-
- def _flush(self, batch: List[dict]) -> None:
- try:
- # Choose streaming iterator if provided; otherwise bytes encoder.
- # If no encoder provided, drop silently to avoid memory use.
- context = self._context_provider() or {}
-
- # Extract enhanced telemetry details from events and add to context extras
- enhanced_extras = {}
- for event in batch:
- # Merge event extras
- event_extras = event.get("extras", {})
- if event_extras:
- for key, value in event_extras.items():
- if value is not None:
- enhanced_extras[key] = value
-
- # Merge request_details (privacy-focused request structure)
- request_details = event.get("request_details", {})
- if request_details:
- for key, value in request_details.items():
- if value is not None:
- enhanced_extras[f"request_{key}"] = value
-
- # Merge response_details (privacy-focused response structure)
- response_details = event.get("response_details", {})
- if response_details:
- for key, value in response_details.items():
- if value is not None:
- enhanced_extras[f"response_{key}"] = value
-
- # Add enhanced extras to context
- if enhanced_extras:
- context = dict(context) # Make a copy
- context["extras"] = enhanced_extras
- if self._encode_batch_iter is not None:
- try:
- plain_iter = self._encode_batch_iter(batch, context) # type: ignore[misc]
- except TypeError:
- plain_iter = self._encode_batch_iter(batch) # type: ignore[misc]
- elif self._encode_batch is not None:
- try:
- data = self._encode_batch(batch, context) # type: ignore[misc]
- except TypeError:
- data = self._encode_batch(batch) # type: ignore[misc]
- plain_iter = iter([data])
- else:
- # Use built-in protobuf encoder when none provided
- from .proto_encoder import encode_telemetry_batch_iter
-
- try:
- plain_iter = encode_telemetry_batch_iter(batch, context)
- except Exception:
- if self._debug:
- raise
- return
-
- headers = {"content-type": self._content_type, "content-encoding": "gzip"}
- if self._api_key:
- headers["authorization"] = f"Bearer {self._api_key}"
- if self._debug:
- # Mask sensitive headers for debug output
- dbg_headers = dict(headers)
- if "authorization" in dbg_headers:
- dbg_headers["authorization"] = "Bearer ***"
- # Summarize event types and include a compact context view
- try:
- type_counts = dict(Counter(str(e.get("type", "unknown")) for e in batch))
- except Exception:
- type_counts = {}
- ctx_view = {}
- try:
- # Show a stable subset of context keys if present
- for k in (
- "sdk_name",
- "sdk_version",
- "language",
- "runtime_version",
- "os",
- "arch",
- "session_id",
- "app_name",
- "app_version",
- "environment",
- "project_id",
- ):
- v = context.get(k)
- if v:
- ctx_view[k] = v
- except Exception:
- pass
- # Compute full bodies in debug mode to print exact payload
- # Determine uncompressed bytes for the batch
- try:
- if self._encode_batch_iter is not None:
- raw_body = b"".join(plain_iter)
- elif self._encode_batch is not None:
- # "data" exists from above branch
- raw_body = data
- else:
- from .proto_encoder import encode_telemetry_batch
-
- raw_body = encode_telemetry_batch(batch, context)
- except Exception:
- raw_body = b""
- # Gzip-compress to match actual wire payload
- try:
- compressor = zlib.compressobj(wbits=31)
- compressed_body = compressor.compress(raw_body) + compressor.flush()
- except Exception:
- compressed_body = b""
- # Print full payload (compressed, base64) and sizes
- print(
- f"[deepgram][telemetry] POST {self._endpoint} "
- f"events={len(batch)} headers={dbg_headers} types={type_counts} context={ctx_view}"
- )
- try:
- b64 = base64.b64encode(compressed_body).decode("ascii") if compressed_body else ""
- except Exception:
- b64 = ""
- print(
- f"[deepgram][telemetry] body.compressed.b64={b64} "
- f"size={len(compressed_body)}B raw={len(raw_body)}B"
- )
- try:
- if self._debug:
- # Send pre-built compressed body in debug mode
- resp = self._client.post(self._endpoint, content=compressed_body, headers=headers)
- else:
- # Stream in normal mode
- resp = self._client.post(self._endpoint, content=self._gzip_iter(plain_iter), headers=headers)
- if self._debug:
- try:
- status = getattr(resp, "status_code", "unknown")
- print(f"[deepgram][telemetry] -> {status}")
- except Exception:
- pass
- except Exception as exc:
- # Log the error in debug mode instead of raising from a worker thread
- if self._debug:
- try:
- print(f"[deepgram][telemetry] -> error: {type(exc).__name__}: {exc}")
- except Exception:
- pass
- # Re-raise to outer handler to count failure/disable logic
- raise
- # Success: reset failure count
- self._consecutive_failures = 0
- except Exception:
- # Swallow errors; telemetry is best-effort
- self._consecutive_failures += 1
- if self._consecutive_failures >= self._max_consecutive_failures:
- self._disable()
-
- def close(self) -> None:
- if self._debug:
- print("[deepgram][telemetry] close() called")
-
- if self._synchronous:
- # Flush any staged events synchronously
- buf = getattr(self, "_buffer_sync", [])
- if buf:
- if self._debug:
- print(f"[deepgram][telemetry] flushing {len(buf)} staged events on close")
- try:
- self._flush(buf)
- finally:
- self._buffer_sync = [] # type: ignore[attr-defined]
- elif self._debug:
- print("[deepgram][telemetry] no staged events to flush")
- try:
- self._client.close()
- except Exception:
- pass
- return
- # First, try to flush any pending events
- if self._debug:
- print(f"[deepgram][telemetry] flushing pending events, queue size: {self._queue.qsize()}")
- try:
- self.flush()
- except Exception:
- if self._debug:
- raise
-
- self._stop_event.set()
- # Drain any remaining events synchronously to ensure a final flush
- drain: List[dict] = []
- try:
- while True:
- drain.append(self._queue.get_nowait())
- except queue.Empty:
- pass
- if drain:
- if self._debug:
- print(f"[deepgram][telemetry] draining {len(drain)} remaining events on close")
- try:
- self._flush(drain)
- except Exception:
- if self._debug:
- raise
- elif self._debug:
- print("[deepgram][telemetry] no remaining events to drain")
- # Give the worker a moment to exit cleanly
- self._worker.join(timeout=1.0)
- try:
- self._client.close()
- except Exception:
- pass
-
- def flush(self) -> None:
- """
- Force a synchronous flush of any staged or queued events.
-
- - In synchronous mode, this flushes the local buffer immediately.
- - In background mode, this drains the queue and flushes in the caller thread.
- Note: this does not capture items already pulled into the worker's internal buffer.
- """
- if self._disabled:
- return
- if self._synchronous:
- buf = getattr(self, "_buffer_sync", [])
- if buf:
- try:
- self._flush(buf)
- finally:
- self._buffer_sync = [] # type: ignore[attr-defined]
- return
- drain: List[dict] = []
- try:
- while True:
- drain.append(self._queue.get_nowait())
- except queue.Empty:
- pass
- if drain:
- self._flush(drain)
-
- @staticmethod
- def _gzip_iter(data_iter: typing.Iterator[bytes]) -> typing.Iterator[bytes]:
- compressor = zlib.compressobj(wbits=31)
- for chunk in data_iter:
- if not isinstance(chunk, (bytes, bytearray)):
- chunk = bytes(chunk)
- if chunk:
- out = compressor.compress(chunk)
- if out:
- yield out
- tail = compressor.flush()
- if tail:
- yield tail
-
- def _disable(self) -> None:
- # Toggle off for this session: drop all future events, stop worker, clear queue fast
- self._disabled = True
- try:
- while True:
- self._queue.get_nowait()
- except queue.Empty:
- pass
- self._stop_event.set()
-
-
diff --git a/src/deepgram/extensions/telemetry/handler.py b/src/deepgram/extensions/telemetry/handler.py
deleted file mode 100644
index 362e0497..00000000
--- a/src/deepgram/extensions/telemetry/handler.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-import typing
-from datetime import datetime
-
-from .models import ErrorEvent, ErrorSeverity, TelemetryEvent
-
-
-class TelemetryHandler:
- """
- Interface for SDK telemetry. Users can supply a custom implementation.
- All methods are optional to implement.
- """
-
- def on_http_request(
- self,
- *,
- method: str,
- url: str,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- pass
-
- def on_http_response(
- self,
- *,
- method: str,
- url: str,
- status_code: int,
- duration_ms: float,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- pass
-
- def on_http_error(
- self,
- *,
- method: str,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- pass
-
- # WebSocket telemetry methods
- def on_ws_connect(
- self,
- *,
- url: str,
- headers: typing.Mapping[str, str] | None,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- pass
-
- def on_ws_error(
- self,
- *,
- url: str,
- error: BaseException,
- extras: typing.Mapping[str, str] | None = None,
- request_details: typing.Mapping[str, typing.Any] | None = None,
- response_details: typing.Mapping[str, typing.Any] | None = None,
- ) -> None:
- pass
-
- def on_ws_close(
- self,
- *,
- url: str,
- ) -> None:
- pass
-
- # Optional: global uncaught errors from sys/threading hooks
- def on_uncaught_error(self, *, error: BaseException) -> None:
- pass
-
-
-class NoOpTelemetryHandler(TelemetryHandler):
- pass
-
-
diff --git a/src/deepgram/extensions/telemetry/models.py b/src/deepgram/extensions/telemetry/models.py
deleted file mode 100644
index 85790649..00000000
--- a/src/deepgram/extensions/telemetry/models.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-Generated Pydantic models from telemetry.proto
-Auto-generated - do not edit manually
-"""
-
-from __future__ import annotations
-
-import typing
-from datetime import datetime
-from enum import Enum
-
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class ErrorSeverity(str, Enum):
- """Error severity level enum."""
- UNSPECIFIED = "ERROR_SEVERITY_UNSPECIFIED"
- INFO = "ERROR_SEVERITY_INFO"
- WARNING = "ERROR_SEVERITY_WARNING"
- ERROR = "ERROR_SEVERITY_ERROR"
- CRITICAL = "ERROR_SEVERITY_CRITICAL"
-
-
-class TelemetryContext(UniversalBaseModel):
- """
- Represents common context about the SDK/CLI and environment producing telemetry.
- """
-
- package_name: typing.Optional[str] = None
- """e.g., "node-sdk", "python-sdk", "cli" """
-
- package_version: typing.Optional[str] = None
- """e.g., "3.2.1" """
-
- language: typing.Optional[str] = None
- """e.g., "node", "python", "go" """
-
- runtime_version: typing.Optional[str] = None
- """e.g., "node 20.11.1", "python 3.11.6" """
-
- os: typing.Optional[str] = None
- """e.g., "darwin", "linux", "windows" """
-
- arch: typing.Optional[str] = None
- """e.g., "arm64", "amd64" """
-
- app_name: typing.Optional[str] = None
- """host application name (if known) """
-
- app_version: typing.Optional[str] = None
- """host application version (if known) """
-
- environment: typing.Optional[str] = None
- """e.g., "prod", "staging", "dev" """
-
- session_id: typing.Optional[str] = None
- """client session identifier """
-
- installation_id: typing.Optional[str] = None
- """stable machine/install identifier when available """
-
- project_id: typing.Optional[str] = None
- """project/workspace identifier if applicable """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class TelemetryEvent(UniversalBaseModel):
- """
- Telemetry event payload carrying arbitrary attributes and metrics.
- """
-
- name: str
- """event name, e.g., "request.start" """
-
- time: datetime
- """event timestamp (UTC) """
-
- attributes: typing.Optional[typing.Dict[str, str]] = None
- """string attributes (tags) """
-
- metrics: typing.Optional[typing.Dict[str, float]] = None
- """numeric metrics """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ErrorEvent(UniversalBaseModel):
- """
- Structured error event.
- """
-
- type: typing.Optional[str] = None
- """error type/class, e.g., "TypeError" """
-
- message: typing.Optional[str] = None
- """error message """
-
- stack_trace: typing.Optional[str] = None
- """stack trace if available """
-
- file: typing.Optional[str] = None
- """source file (if known) """
-
- line: typing.Optional[int] = None
- """source line number """
-
- column: typing.Optional[int] = None
- """source column number """
-
- severity: ErrorSeverity = ErrorSeverity.UNSPECIFIED
- """severity level """
-
- handled: bool = False
- """whether the error was handled """
-
- time: datetime
- """error timestamp (UTC) """
-
- attributes: typing.Optional[typing.Dict[str, str]] = None
- """additional context as key/value """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class Record(UniversalBaseModel):
- """
- A single record may be telemetry or error.
- """
-
- telemetry: typing.Optional[TelemetryEvent] = None
- error: typing.Optional[ErrorEvent] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class TelemetryBatch(UniversalBaseModel):
- """
- Batch payload sent to the ingestion endpoint.
- The entire batch may be gzip-compressed; server accepts raw or gzip.
- """
-
- context: TelemetryContext
- """shared context for the batch """
-
- records: typing.List[Record]
- """telemetry and error records """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/telemetry/proto_encoder.py b/src/deepgram/extensions/telemetry/proto_encoder.py
deleted file mode 100644
index a085ed0e..00000000
--- a/src/deepgram/extensions/telemetry/proto_encoder.py
+++ /dev/null
@@ -1,379 +0,0 @@
-from __future__ import annotations
-# isort: skip_file
-
-import struct
-import time
-import typing
-from typing import Dict, List
-
-
-# --- Protobuf wire helpers (proto3) ---
-
-def _varint(value: int) -> bytes:
- if value < 0:
- # For this usage we only encode non-negative values
- value &= (1 << 64) - 1
- out = bytearray()
- while value > 0x7F:
- out.append((value & 0x7F) | 0x80)
- value >>= 7
- out.append(value)
- return bytes(out)
-
-
-def _key(field_number: int, wire_type: int) -> bytes:
- return _varint((field_number << 3) | wire_type)
-
-
-def _len_delimited(field_number: int, payload: bytes) -> bytes:
- return _key(field_number, 2) + _varint(len(payload)) + payload
-
-
-def _string(field_number: int, value: str) -> bytes:
- data = value.encode("utf-8")
- return _len_delimited(field_number, data)
-
-
-def _bool(field_number: int, value: bool) -> bytes:
- return _key(field_number, 0) + _varint(1 if value else 0)
-
-
-def _int64(field_number: int, value: int) -> bytes:
- return _key(field_number, 0) + _varint(value)
-
-
-def _double(field_number: int, value: float) -> bytes:
- return _key(field_number, 1) + struct.pack(" bytes:
- sec = int(ts_seconds)
- nanos = int(round((ts_seconds - sec) * 1_000_000_000))
- if nanos >= 1_000_000_000:
- sec += 1
- nanos -= 1_000_000_000
- msg = bytearray()
- msg += _int64(1, sec)
- if nanos:
- msg += _key(2, 0) + _varint(nanos)
- return bytes(msg)
-
-
-# Map encoders: map and map
-def _map_str_str(field_number: int, items: typing.Mapping[str, str] | None) -> bytes:
- if not items:
- return b""
- out = bytearray()
- for k, v in items.items():
- entry = _string(1, k) + _string(2, v)
- out += _len_delimited(field_number, entry)
- return bytes(out)
-
-
-def _map_str_double(field_number: int, items: typing.Mapping[str, float] | None) -> bytes:
- if not items:
- return b""
- out = bytearray()
- for k, v in items.items():
- entry = _string(1, k) + _double(2, float(v))
- out += _len_delimited(field_number, entry)
- return bytes(out)
-
-
-# --- Schema-specific encoders (deepgram.dxtelemetry.v1) ---
-
-def _encode_telemetry_context(ctx: typing.Mapping[str, typing.Any]) -> bytes:
- # Map SDK context keys to proto fields
- package_name = ctx.get("sdk_name") or ctx.get("package_name") or "python-sdk"
- package_version = ctx.get("sdk_version") or ctx.get("package_version") or ""
- language = ctx.get("language") or "python"
- runtime_version = ctx.get("runtime_version") or ""
- os_name = ctx.get("os") or ""
- arch = ctx.get("arch") or ""
- app_name = ctx.get("app_name") or ""
- app_version = ctx.get("app_version") or ""
- environment = ctx.get("environment") or ""
- session_id = ctx.get("session_id") or ""
- installation_id = ctx.get("installation_id") or ""
- project_id = ctx.get("project_id") or ""
-
- msg = bytearray()
- if package_name:
- msg += _string(1, package_name)
- if package_version:
- msg += _string(2, package_version)
- if language:
- msg += _string(3, language)
- if runtime_version:
- msg += _string(4, runtime_version)
- if os_name:
- msg += _string(5, os_name)
- if arch:
- msg += _string(6, arch)
- if app_name:
- msg += _string(7, app_name)
- if app_version:
- msg += _string(8, app_version)
- if environment:
- msg += _string(9, environment)
- if session_id:
- msg += _string(10, session_id)
- if installation_id:
- msg += _string(11, installation_id)
- if project_id:
- msg += _string(12, project_id)
-
- # Include extras as additional context attributes (field 13)
- extras = ctx.get("extras", {})
- if extras:
- # Convert extras to string-string map for protobuf
- extras_map = {}
- for key, value in extras.items():
- if value is not None:
- extras_map[str(key)] = str(value)
- msg += _map_str_str(13, extras_map)
-
- return bytes(msg)
-
-
-def _encode_telemetry_event(name: str, ts: float, attributes: Dict[str, str] | None, metrics: Dict[str, float] | None) -> bytes:
- msg = bytearray()
- msg += _string(1, name)
- msg += _len_delimited(2, _timestamp_message(ts))
- msg += _map_str_str(3, attributes)
- msg += _map_str_double(4, metrics)
- return bytes(msg)
-
-
-# ErrorSeverity enum values: ... INFO=1, WARNING=2, ERROR=3, CRITICAL=4
-def _encode_error_event(
- *,
- err_type: str,
- message: str,
- severity: int,
- handled: bool,
- ts: float,
- attributes: Dict[str, str] | None,
- stack_trace: str | None = None,
- file: str | None = None,
- line: int | None = None,
- column: int | None = None,
-) -> bytes:
- msg = bytearray()
- if err_type:
- msg += _string(1, err_type)
- if message:
- msg += _string(2, message)
- if stack_trace:
- msg += _string(3, stack_trace)
- if file:
- msg += _string(4, file)
- if line is not None:
- msg += _key(5, 0) + _varint(line)
- if column is not None:
- msg += _key(6, 0) + _varint(column)
- msg += _key(7, 0) + _varint(severity)
- msg += _bool(8, handled)
- msg += _len_delimited(9, _timestamp_message(ts))
- msg += _map_str_str(10, attributes)
- return bytes(msg)
-
-
-def _encode_record(record: bytes, kind_field_number: int) -> bytes:
- # kind_field_number: 1 for telemetry, 2 for error
- return _len_delimited(2, _len_delimited(kind_field_number, record))
-
-
-def _normalize_events(events: List[dict]) -> List[bytes]:
- out: List[bytes] = []
- for e in events:
- etype = e.get("type")
- ts = float(e.get("ts", time.time()))
- if etype == "http_request":
- attrs = {
- "method": str(e.get("method", "")),
- # Note: URL is never logged for privacy
- }
- # Add request_id if present in headers for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
- rec = _encode_telemetry_event("http.request", ts, attrs, None)
- out.append(_encode_record(rec, 1))
- elif etype == "http_response":
- attrs = {
- "method": str(e.get("method", "")),
- "status_code": str(e.get("status_code", "")),
- # Note: URL is never logged for privacy
- }
- # Add request_id if present in headers for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
- metrics = {"duration_ms": float(e.get("duration_ms", 0.0))}
- rec = _encode_telemetry_event("http.response", ts, attrs, metrics)
- out.append(_encode_record(rec, 1))
- elif etype == "http_error":
- attrs = {
- "method": str(e.get("method", "")),
- # Note: URL is never logged for privacy
- }
- # Include status_code if present
- sc = e.get("status_code")
- if sc is not None:
- attrs["status_code"] = str(sc)
- # Add request_id if present in headers for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
- rec = _encode_error_event(
- err_type=str(e.get("error", "Error")),
- message=str(e.get("message", "")),
- severity=3,
- handled=bool(e.get("handled", True)),
- ts=ts,
- attributes=attrs,
- stack_trace=str(e.get("stack_trace", "")) or None,
- )
- out.append(_encode_record(rec, 2))
- elif etype == "ws_connect":
- attrs = {
- # Note: URL is never logged for privacy
- "connection_type": "websocket",
- }
- # Add request_id if present for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
- rec = _encode_telemetry_event("ws.connect", ts, attrs, None)
- out.append(_encode_record(rec, 1))
- elif etype == "ws_error":
- attrs = {
- # Note: URL is never logged for privacy
- "connection_type": "websocket",
- }
-
- # Add detailed error information to attributes
- if e.get("error_type"):
- attrs["error_type"] = str(e["error_type"])
- if e.get("function_name"):
- attrs["function_name"] = str(e["function_name"])
- if e.get("sdk_function"):
- attrs["sdk_function"] = str(e["sdk_function"])
- if e.get("timeout_occurred"):
- attrs["timeout_occurred"] = str(e["timeout_occurred"])
- if e.get("duration_ms"):
- attrs["duration_ms"] = str(e["duration_ms"])
-
- # Add WebSocket handshake failure details
- if e.get("handshake_status_code"):
- attrs["handshake_status_code"] = str(e["handshake_status_code"])
- if e.get("handshake_reason_phrase"):
- attrs["handshake_reason_phrase"] = str(e["handshake_reason_phrase"])
- if e.get("handshake_error_type"):
- attrs["handshake_error_type"] = str(e["handshake_error_type"])
- if e.get("handshake_response_headers"):
- # Include important handshake response headers
- handshake_headers = e["handshake_response_headers"]
- for header_name, header_value in handshake_headers.items():
- # Prefix with 'handshake_' to distinguish from request headers
- safe_header_name = header_name.lower().replace('-', '_')
- attrs[f"handshake_{safe_header_name}"] = str(header_value)
-
- # Add connection parameters if available
- if e.get("connection_params"):
- for key, value in e["connection_params"].items():
- if value is not None:
- attrs[f"connection_{key}"] = str(value)
-
- # Add request_id if present for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
-
- # Include ALL extras in the attributes for comprehensive telemetry
- extras = e.get("extras", {})
- if extras:
- for key, value in extras.items():
- if value is not None and key not in attrs:
- attrs[str(key)] = str(value)
-
- rec = _encode_error_event(
- err_type=str(e.get("error_type", e.get("error", "Error"))),
- message=str(e.get("error_message", e.get("message", ""))),
- severity=3,
- handled=bool(e.get("handled", True)),
- ts=ts,
- attributes=attrs,
- stack_trace=str(e.get("stack_trace", "")) or None,
- )
- out.append(_encode_record(rec, 2))
- elif etype == "uncaught_error":
- rec = _encode_error_event(
- err_type=str(e.get("error", "Error")),
- message=str(e.get("message", "")),
- severity=4 if not bool(e.get("handled", False)) else 3,
- handled=bool(e.get("handled", False)),
- ts=ts,
- attributes=None,
- stack_trace=str(e.get("stack_trace", "")) or None,
- )
- out.append(_encode_record(rec, 2))
- elif etype == "ws_close":
- attrs = {
- # Note: URL is never logged for privacy
- "connection_type": "websocket",
- }
- # Add request_id if present for server-side correlation
- request_id = e.get("request_id")
- if request_id:
- attrs["request_id"] = str(request_id)
- rec = _encode_telemetry_event("ws.close", ts, attrs, None)
- out.append(_encode_record(rec, 1))
- elif etype == "telemetry_event":
- # Generic telemetry event with custom name
- name = e.get("name", "unknown")
- attrs = dict(e.get("attributes", {}))
- metrics = e.get("metrics", {})
- # Convert metrics to float values
- if metrics:
- metrics = {k: float(v) for k, v in metrics.items()}
- rec = _encode_telemetry_event(name, ts, attrs, metrics)
- out.append(_encode_record(rec, 1))
- elif etype == "error_event":
- # Generic error event
- attrs = dict(e.get("attributes", {}))
- rec = _encode_error_event(
- err_type=str(e.get("error_type", "Error")),
- message=str(e.get("message", "")),
- severity=int(e.get("severity", 3)),
- handled=bool(e.get("handled", True)),
- ts=ts,
- attributes=attrs,
- stack_trace=str(e.get("stack_trace", "")) or None,
- file=str(e.get("file", "")) or None,
- line=int(e.get("line", 0)) if e.get("line") else None,
- column=int(e.get("column", 0)) if e.get("column") else None,
- )
- out.append(_encode_record(rec, 2))
- else:
- # Unknown event: drop silently
- continue
- return out
-
-
-def encode_telemetry_batch(events: List[dict], context: typing.Mapping[str, typing.Any]) -> bytes:
- ctx = _encode_telemetry_context(context)
- records = b"".join(_normalize_events(events))
- batch = _len_delimited(1, ctx) + records
- return batch
-
-
-def encode_telemetry_batch_iter(events: List[dict], context: typing.Mapping[str, typing.Any]) -> typing.Iterator[bytes]:
- # Streaming variant: yield small chunks (context first, then each record)
- yield _len_delimited(1, _encode_telemetry_context(context))
- for rec in _normalize_events(events):
- yield rec
-
-
diff --git a/src/deepgram/extensions/types/__init__.py b/src/deepgram/extensions/types/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/deepgram/extensions/types/sockets/__init__.py b/src/deepgram/extensions/types/sockets/__init__.py
deleted file mode 100644
index 2bf7ab2d..00000000
--- a/src/deepgram/extensions/types/sockets/__init__.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# Socket message types - protected from auto-generation
-
-# isort: skip_file
-
-import typing
-from importlib import import_module
-
-if typing.TYPE_CHECKING:
- # Speak socket types
- from .speak_v1_text_message import SpeakV1TextMessage
- from .speak_v1_control_message import SpeakV1ControlMessage
- from .speak_v1_audio_chunk_event import SpeakV1AudioChunkEvent
- from .speak_v1_metadata_event import SpeakV1MetadataEvent
- from .speak_v1_control_event import SpeakV1ControlEvent
- from .speak_v1_warning_event import SpeakV1WarningEvent
-
- # Listen socket types
- from .listen_v1_media_message import ListenV1MediaMessage
- from .listen_v1_control_message import ListenV1ControlMessage
- from .listen_v1_results_event import ListenV1ResultsEvent
- from .listen_v1_metadata_event import ListenV1MetadataEvent
- from .listen_v1_utterance_end_event import ListenV1UtteranceEndEvent
- from .listen_v1_speech_started_event import ListenV1SpeechStartedEvent
-
- # Listen V2 socket types
- from .listen_v2_media_message import ListenV2MediaMessage
- from .listen_v2_control_message import ListenV2ControlMessage
- from .listen_v2_connected_event import ListenV2ConnectedEvent
- from .listen_v2_turn_info_event import ListenV2TurnInfoEvent
- from .listen_v2_fatal_error_event import ListenV2FatalErrorEvent
-
- # Agent socket types - Main message types
- from .agent_v1_settings_message import AgentV1SettingsMessage
- from .agent_v1_update_speak_message import AgentV1UpdateSpeakMessage
- from .agent_v1_update_prompt_message import AgentV1UpdatePromptMessage
- from .agent_v1_inject_user_message_message import AgentV1InjectUserMessageMessage
- from .agent_v1_inject_agent_message_message import AgentV1InjectAgentMessageMessage
- from .agent_v1_function_call_response_message import AgentV1FunctionCallResponseMessage
- from .agent_v1_control_message import AgentV1ControlMessage
- from .agent_v1_media_message import AgentV1MediaMessage
- from .agent_v1_welcome_message import AgentV1WelcomeMessage
- from .agent_v1_settings_applied_event import AgentV1SettingsAppliedEvent
- from .agent_v1_conversation_text_event import AgentV1ConversationTextEvent
- from .agent_v1_user_started_speaking_event import AgentV1UserStartedSpeakingEvent
- from .agent_v1_agent_thinking_event import AgentV1AgentThinkingEvent
- from .agent_v1_function_call_request_event import AgentV1FunctionCallRequestEvent
- from .agent_v1_agent_started_speaking_event import AgentV1AgentStartedSpeakingEvent
- from .agent_v1_agent_audio_done_event import AgentV1AgentAudioDoneEvent
- from .agent_v1_prompt_updated_event import AgentV1PromptUpdatedEvent
- from .agent_v1_speak_updated_event import AgentV1SpeakUpdatedEvent
- from .agent_v1_injection_refused_event import AgentV1InjectionRefusedEvent
- from .agent_v1_error_event import AgentV1ErrorEvent
- from .agent_v1_warning_event import AgentV1WarningEvent
- from .agent_v1_audio_chunk_event import AgentV1AudioChunkEvent
-
- # Agent socket types - Nested configuration types
- from .agent_v1_settings_message import (
- AgentV1AudioInput,
- AgentV1AudioOutput,
- AgentV1AudioConfig,
- AgentV1HistoryMessage,
- AgentV1FunctionCall,
- AgentV1HistoryFunctionCalls,
- AgentV1Flags,
- AgentV1Context,
- AgentV1ListenProvider,
- AgentV1Listen,
- AgentV1Endpoint,
- AgentV1AwsCredentials,
- AgentV1Function,
- AgentV1OpenAiThinkProvider,
- AgentV1AwsBedrockThinkProvider,
- AgentV1AnthropicThinkProvider,
- AgentV1GoogleThinkProvider,
- AgentV1GroqThinkProvider,
- AgentV1Think,
- AgentV1DeepgramSpeakProvider,
- AgentV1ElevenLabsSpeakProvider,
- AgentV1CartesiaVoice,
- AgentV1CartesiaSpeakProvider,
- AgentV1OpenAiSpeakProvider,
- AgentV1AwsPollySpeakProvider,
- AgentV1SpeakProviderConfig,
- AgentV1Agent,
- )
-
- # Union types for socket clients
- from .socket_client_responses import (
- SpeakV1SocketClientResponse,
- ListenV1SocketClientResponse,
- ListenV2SocketClientResponse,
- AgentV1SocketClientResponse,
- # Backward compatibility aliases
- SpeakSocketClientResponse,
- ListenSocketClientResponse,
- AgentSocketClientResponse,
- )
-
-__all__ = [
- # Speak socket types
- "SpeakV1TextMessage",
- "SpeakV1ControlMessage",
- "SpeakV1AudioChunkEvent",
- "SpeakV1MetadataEvent",
- "SpeakV1ControlEvent",
- "SpeakV1WarningEvent",
-
- # Listen socket types
- "ListenV1MediaMessage",
- "ListenV1ControlMessage",
- "ListenV1ResultsEvent",
- "ListenV1MetadataEvent",
- "ListenV1UtteranceEndEvent",
- "ListenV1SpeechStartedEvent",
-
- # Listen V2 socket types
- "ListenV2MediaMessage",
- "ListenV2ControlMessage",
- "ListenV2ConnectedEvent",
- "ListenV2TurnInfoEvent",
- "ListenV2FatalErrorEvent",
-
- # Agent socket types - Main message types
- "AgentV1SettingsMessage",
- "AgentV1UpdateSpeakMessage",
- "AgentV1UpdatePromptMessage",
- "AgentV1InjectUserMessageMessage",
- "AgentV1InjectAgentMessageMessage",
- "AgentV1FunctionCallResponseMessage",
- "AgentV1ControlMessage",
- "AgentV1MediaMessage",
- "AgentV1WelcomeMessage",
- "AgentV1SettingsAppliedEvent",
- "AgentV1ConversationTextEvent",
- "AgentV1UserStartedSpeakingEvent",
- "AgentV1AgentThinkingEvent",
- "AgentV1FunctionCallRequestEvent",
- "AgentV1AgentStartedSpeakingEvent",
- "AgentV1AgentAudioDoneEvent",
- "AgentV1PromptUpdatedEvent",
- "AgentV1SpeakUpdatedEvent",
- "AgentV1InjectionRefusedEvent",
- "AgentV1ErrorEvent",
- "AgentV1WarningEvent",
- "AgentV1AudioChunkEvent",
-
- # Agent socket types - Nested configuration types
- "AgentV1AudioInput",
- "AgentV1AudioOutput",
- "AgentV1AudioConfig",
- "AgentV1HistoryMessage",
- "AgentV1FunctionCall",
- "AgentV1HistoryFunctionCalls",
- "AgentV1Flags",
- "AgentV1Context",
- "AgentV1ListenProvider",
- "AgentV1Listen",
- "AgentV1Endpoint",
- "AgentV1AwsCredentials",
- "AgentV1Function",
- "AgentV1OpenAiThinkProvider",
- "AgentV1AwsBedrockThinkProvider",
- "AgentV1AnthropicThinkProvider",
- "AgentV1GoogleThinkProvider",
- "AgentV1GroqThinkProvider",
- "AgentV1Think",
- "AgentV1DeepgramSpeakProvider",
- "AgentV1ElevenLabsSpeakProvider",
- "AgentV1CartesiaVoice",
- "AgentV1CartesiaSpeakProvider",
- "AgentV1OpenAiSpeakProvider",
- "AgentV1AwsPollySpeakProvider",
- "AgentV1SpeakProviderConfig",
- "AgentV1Agent",
-
- # Union types
- "SpeakV1SocketClientResponse",
- "ListenV1SocketClientResponse",
- "ListenV2SocketClientResponse",
- "AgentV1SocketClientResponse",
-
- # Backward compatibility aliases
- "SpeakSocketClientResponse",
- "ListenSocketClientResponse",
- "AgentSocketClientResponse",
-]
-
-
-def __getattr__(name: str) -> typing.Any:
- if name in __all__:
- # Handle special case for union types
- if name.endswith("SocketClientResponse"):
- return getattr(import_module(".socket_client_responses", package=__name__), name)
-
- # Handle nested types from agent_v1_settings_message
- nested_agent_types = {
- "AgentV1AudioInput", "AgentV1AudioOutput", "AgentV1AudioConfig",
- "AgentV1HistoryMessage", "AgentV1FunctionCall", "AgentV1HistoryFunctionCalls",
- "AgentV1Flags", "AgentV1Context", "AgentV1ListenProvider", "AgentV1Listen",
- "AgentV1Endpoint", "AgentV1AwsCredentials", "AgentV1Function",
- "AgentV1OpenAiThinkProvider", "AgentV1AwsBedrockThinkProvider",
- "AgentV1AnthropicThinkProvider", "AgentV1GoogleThinkProvider",
- "AgentV1GroqThinkProvider", "AgentV1Think", "AgentV1DeepgramSpeakProvider",
- "AgentV1ElevenLabsSpeakProvider", "AgentV1CartesiaVoice",
- "AgentV1CartesiaSpeakProvider", "AgentV1OpenAiSpeakProvider",
- "AgentV1AwsPollySpeakProvider", "AgentV1SpeakProviderConfig",
- "AgentV1Agent"
- }
-
- if name in nested_agent_types:
- return getattr(import_module(".agent_v1_settings_message", package=__name__), name)
-
- # Convert CamelCase to snake_case for other types
- import re
- module_name = re.sub('([A-Z]+)', r'_\1', name).lower().lstrip('_')
- return getattr(import_module(f".{module_name}", package=__name__), name)
- raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_agent_audio_done_event.py b/src/deepgram/extensions/types/sockets/agent_v1_agent_audio_done_event.py
deleted file mode 100644
index b1c4c280..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_agent_audio_done_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Agent V1 Agent Audio Done Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1AgentAudioDoneEvent(UniversalBaseModel):
- """
- Get signals that the server has finished sending the final audio segment to the client
- """
-
- type: typing.Literal["AgentAudioDone"] = "AgentAudioDone"
- """Message type identifier indicating the agent has finished sending audio"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_agent_started_speaking_event.py b/src/deepgram/extensions/types/sockets/agent_v1_agent_started_speaking_event.py
deleted file mode 100644
index 4484892c..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_agent_started_speaking_event.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Agent V1 Agent Started Speaking Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1AgentStartedSpeakingEvent(UniversalBaseModel):
- """
- Get notified when the server begins streaming an agent's audio response for playback.
- This message is only sent when the experimental flag is enabled
- """
-
- type: typing.Literal["AgentStartedSpeaking"] = "AgentStartedSpeaking"
- """Message type identifier for agent started speaking"""
-
- total_latency: float
- """Seconds from receiving the user's utterance to producing the agent's reply"""
-
- tts_latency: float
- """The portion of total latency attributable to text-to-speech"""
-
- ttt_latency: float
- """The portion of total latency attributable to text-to-text (usually an LLM)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_agent_thinking_event.py b/src/deepgram/extensions/types/sockets/agent_v1_agent_thinking_event.py
deleted file mode 100644
index ea02b83a..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_agent_thinking_event.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Agent V1 Agent Thinking Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1AgentThinkingEvent(UniversalBaseModel):
- """
- Inform the client when the agent is processing information
- """
-
- type: typing.Literal["AgentThinking"] = "AgentThinking"
- """Message type identifier for agent thinking"""
-
- content: str
- """The text of the agent's thought process"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_audio_chunk_event.py b/src/deepgram/extensions/types/sockets/agent_v1_audio_chunk_event.py
deleted file mode 100644
index ddc079e5..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_audio_chunk_event.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Agent V1 Audio Chunk Event - protected from auto-generation
-
-# This represents binary audio data received from the Voice Agent WebSocket
-# The actual data is bytes, but we define this as a type alias for clarity
-AgentV1AudioChunkEvent = bytes
-"""
-Raw binary audio data generated by Deepgram's Voice Agent API.
-Content-Type: application/octet-stream
-"""
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_conversation_text_event.py b/src/deepgram/extensions/types/sockets/agent_v1_conversation_text_event.py
deleted file mode 100644
index f3934985..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_conversation_text_event.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Agent V1 Conversation Text Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1ConversationTextEvent(UniversalBaseModel):
- """
- Facilitate real-time communication by relaying spoken statements from both the user and the assistant
- """
-
- type: typing.Literal["ConversationText"] = "ConversationText"
- """Message type identifier for conversation text"""
-
- role: typing.Literal["user", "assistant"]
- """Identifies who spoke the statement"""
-
- content: str
- """The actual statement that was spoken"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_function_call_request_event.py b/src/deepgram/extensions/types/sockets/agent_v1_function_call_request_event.py
deleted file mode 100644
index 0a719b0f..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_function_call_request_event.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Agent V1 Function Call Request Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1FunctionCallRequestFunction(UniversalBaseModel):
- """Function call request details"""
-
- id: str
- """Unique identifier for the function call"""
-
- name: str
- """The name of the function to call"""
-
- arguments: str
- """JSON string containing the function arguments"""
-
- client_side: bool
- """Whether the function should be executed client-side"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1FunctionCallRequestEvent(UniversalBaseModel):
- """
- Client-side or server-side function call request sent by the server
- """
-
- type: typing.Literal["FunctionCallRequest"] = "FunctionCallRequest"
- """Message type identifier for function call requests"""
-
- functions: typing.List[AgentV1FunctionCallRequestFunction]
- """Array of functions to be called"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_function_call_response_message.py b/src/deepgram/extensions/types/sockets/agent_v1_function_call_response_message.py
deleted file mode 100644
index 6dde0df2..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_function_call_response_message.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Agent V1 Function Call Response Message - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1FunctionCallResponseMessage(UniversalBaseModel):
- """
- Client-side or server-side function call response sent by the server
- """
-
- type: typing.Literal["FunctionCallResponse"] = "FunctionCallResponse"
- """Message type identifier for function call responses"""
-
- name: str
- """The name of the function being called"""
-
- content: str
- """The content or result of the function call"""
-
- id: typing.Optional[str] = None
- """The unique identifier for the function call (optional but recommended for traceability)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_inject_agent_message_message.py b/src/deepgram/extensions/types/sockets/agent_v1_inject_agent_message_message.py
deleted file mode 100644
index 1c00546d..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_inject_agent_message_message.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Agent V1 Inject Agent Message Message - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1InjectAgentMessageMessage(UniversalBaseModel):
- """
- Immediately trigger an agent response during a conversation
- """
-
- type: typing.Literal["InjectAgentMessage"] = "InjectAgentMessage"
- """Message type identifier for injecting an agent message"""
-
- message: str
- """The statement that the agent should say"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_inject_user_message_message.py b/src/deepgram/extensions/types/sockets/agent_v1_inject_user_message_message.py
deleted file mode 100644
index 0e7da495..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_inject_user_message_message.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Agent V1 Inject User Message Message - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1InjectUserMessageMessage(UniversalBaseModel):
- """
- Send a text based message to the agent
- """
-
- type: typing.Literal["InjectUserMessage"] = "InjectUserMessage"
- """Message type identifier for injecting a user message"""
-
- content: str
- """The specific phrase or statement the agent should respond to"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_injection_refused_event.py b/src/deepgram/extensions/types/sockets/agent_v1_injection_refused_event.py
deleted file mode 100644
index c0f93373..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_injection_refused_event.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Agent V1 Injection Refused Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1InjectionRefusedEvent(UniversalBaseModel):
- """
- Receive injection refused message
- """
-
- type: typing.Literal["InjectionRefused"] = "InjectionRefused"
- """Message type identifier for injection refused"""
-
- message: str
- """Details about why the injection was refused"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_media_message.py b/src/deepgram/extensions/types/sockets/agent_v1_media_message.py
deleted file mode 100644
index c3c9ffdb..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_media_message.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Agent V1 Media Message - protected from auto-generation
-
-# This represents binary media data sent to the Voice Agent WebSocket
-# The actual data is bytes, but we define this as a type alias for clarity
-AgentV1MediaMessage = bytes
-"""
-Raw binary audio data sent to Deepgram's Voice Agent API for processing.
-Content-Type: application/octet-stream
-"""
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_prompt_updated_event.py b/src/deepgram/extensions/types/sockets/agent_v1_prompt_updated_event.py
deleted file mode 100644
index 0d0db586..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_prompt_updated_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Agent V1 Prompt Updated Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1PromptUpdatedEvent(UniversalBaseModel):
- """
- Confirms that an UpdatePrompt message from the client has been applied
- """
-
- type: typing.Literal["PromptUpdated"] = "PromptUpdated"
- """Message type identifier for prompt update confirmation"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_settings_applied_event.py b/src/deepgram/extensions/types/sockets/agent_v1_settings_applied_event.py
deleted file mode 100644
index 6987e368..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_settings_applied_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Agent V1 Settings Applied Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1SettingsAppliedEvent(UniversalBaseModel):
- """
- Confirm the server has successfully received and applied the Settings message
- """
-
- type: typing.Literal["SettingsApplied"] = "SettingsApplied"
- """Message type identifier for settings applied confirmation"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_settings_message.py b/src/deepgram/extensions/types/sockets/agent_v1_settings_message.py
deleted file mode 100644
index ccb57e84..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_settings_message.py
+++ /dev/null
@@ -1,685 +0,0 @@
-# Agent V1 Settings Message - protected from auto-generation
-
-import typing
-
-try:
- from typing import Annotated # type: ignore[attr-defined,assignment]
-except ImportError:
- from typing_extensions import Annotated # type: ignore[import-untyped,assignment]
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-# Cross-version constrained types
-if IS_PYDANTIC_V2:
- IntContextLength = Annotated[int, pydantic.Field(ge=2)] # type: ignore[misc,assignment,attr-defined,valid-type]
- Temperature0to2 = Annotated[float, pydantic.Field(ge=0, le=2)] # type: ignore[misc,assignment,attr-defined,valid-type]
- Temperature0to1 = Annotated[float, pydantic.Field(ge=0, le=1)] # type: ignore[misc,assignment,attr-defined,valid-type]
-else:
- IntContextLength = pydantic.conint(ge=2) # type: ignore[attr-defined,misc,assignment,no-redef,valid-type]
- Temperature0to2 = pydantic.confloat(ge=0, le=2) # type: ignore[attr-defined,misc,assignment,no-redef,valid-type]
- Temperature0to1 = pydantic.confloat(ge=0, le=1) # type: ignore[attr-defined,misc,assignment,no-redef,valid-type]
-
-
-class AgentV1AudioInput(UniversalBaseModel):
- """Audio input configuration settings"""
-
- encoding: typing.Literal[
- "linear16", "linear32", "flac", "alaw", "mulaw",
- "amr-nb", "amr-wb", "opus", "ogg-opus", "speex", "g729"
- ] = "linear16"
- """Audio encoding format"""
-
- sample_rate: int = 24000
- """Sample rate in Hz. Common values are 16000, 24000, 44100, 48000"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1AudioOutput(UniversalBaseModel):
- """Audio output configuration settings"""
-
- encoding: typing.Optional[typing.Literal["linear16", "mulaw", "alaw"]] = "linear16"
- """Audio encoding format for streaming TTS output"""
-
- sample_rate: typing.Optional[int] = None
- """Sample rate in Hz"""
-
- bitrate: typing.Optional[int] = None
- """Audio bitrate in bits per second"""
-
- container: typing.Optional[str] = None
- """Audio container format. If omitted, defaults to 'none'"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1AudioConfig(UniversalBaseModel):
- """Audio configuration settings"""
-
- input: typing.Optional[AgentV1AudioInput] = None
- """Audio input configuration"""
-
- output: typing.Optional[AgentV1AudioOutput] = None
- """Audio output configuration"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1HistoryMessage(UniversalBaseModel):
- """Conversation text as part of the conversation history"""
-
- type: typing.Literal["History"] = "History"
- """Message type identifier for conversation text"""
-
- role: typing.Literal["user", "assistant"]
- """Identifies who spoke the statement"""
-
- content: str
- """The actual statement that was spoken"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1FunctionCall(UniversalBaseModel):
- """Function call in conversation history"""
-
- id: str
- """Unique identifier for the function call"""
-
- name: str
- """Name of the function called"""
-
- client_side: bool
- """Indicates if the call was client-side or server-side"""
-
- arguments: str
- """Arguments passed to the function"""
-
- response: str
- """Response from the function call"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1HistoryFunctionCalls(UniversalBaseModel):
- """Client-side or server-side function call request and response as part of the conversation history"""
-
- type: typing.Literal["History"] = "History"
- """Message type identifier"""
-
- function_calls: typing.List[AgentV1FunctionCall]
- """List of function call objects"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1Flags(UniversalBaseModel):
- """Agent flags configuration"""
-
- history: bool = True
- """Enable or disable history message reporting"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Context configuration
-class AgentV1Context(UniversalBaseModel):
- """Conversation context including the history of messages and function calls"""
-
- messages: typing.Optional[typing.List[typing.Union[AgentV1HistoryMessage, AgentV1HistoryFunctionCalls]]] = None
- """Conversation history as a list of messages and function calls"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Listen provider configuration
-class AgentV1ListenProvider(UniversalBaseModel):
- """Listen provider configuration"""
-
- type: typing.Literal["deepgram"] = "deepgram"
- """Provider type for speech-to-text"""
-
- model: str
- """Model to use for speech to text"""
-
- keyterms: typing.Optional[typing.List[str]] = None
- """Prompt key-term recognition (nova-3 'en' only)"""
-
- smart_format: typing.Optional[bool] = False
- """Applies smart formatting to improve transcript readability (Deepgram providers only)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1Listen(UniversalBaseModel):
- """Listen configuration"""
-
- provider: AgentV1ListenProvider
- """Listen provider configuration"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Endpoint configuration
-class AgentV1Endpoint(UniversalBaseModel):
- """Custom endpoint configuration"""
-
- url: str
- """Custom endpoint URL"""
-
- headers: typing.Optional[typing.Dict[str, str]] = None
- """Custom headers for the endpoint"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# AWS Credentials
-class AgentV1AwsCredentials(UniversalBaseModel):
- """AWS credentials configuration"""
-
- type: typing.Literal["sts", "iam"]
- """AWS credentials type (STS short-lived or IAM long-lived)"""
-
- region: str
- """AWS region"""
-
- access_key_id: str
- """AWS access key"""
-
- secret_access_key: str
- """AWS secret access key"""
-
- session_token: typing.Optional[str] = None
- """AWS session token (required for STS only)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Function definition
-class AgentV1Function(UniversalBaseModel):
- """Function definition for think provider"""
-
- name: str
- """Function name"""
-
- description: typing.Optional[str] = None
- """Function description"""
-
- parameters: typing.Optional[typing.Dict[str, typing.Any]] = None
- """Function parameters"""
-
- endpoint: typing.Optional[AgentV1Endpoint] = None
- """The Function endpoint to call. if not passed, function is called client-side"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Think provider configurations
-class AgentV1OpenAiThinkProvider(UniversalBaseModel):
- """OpenAI think provider configuration"""
-
- type: typing.Literal["open_ai"] = "open_ai"
- """Provider type"""
-
- model: typing.Literal[
- "gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "gpt-4o", "gpt-4o-mini"
- ]
- """OpenAI model to use"""
-
- temperature: typing.Optional[Temperature0to2] = None # type: ignore[valid-type]
- """OpenAI temperature (0-2)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1AwsBedrockThinkProvider(UniversalBaseModel):
- """AWS Bedrock think provider configuration"""
-
- type: typing.Literal["aws_bedrock"] = "aws_bedrock"
- """Provider type"""
-
- model: typing.Literal[
- "anthropic/claude-3-5-sonnet-20240620-v1:0",
- "anthropic/claude-3-5-haiku-20240307-v1:0"
- ]
- """AWS Bedrock model to use"""
-
- temperature: typing.Optional[Temperature0to2] = None # type: ignore[valid-type]
- """AWS Bedrock temperature (0-2)"""
-
- credentials: typing.Optional[AgentV1AwsCredentials] = None
- """AWS credentials type (STS short-lived or IAM long-lived)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1AnthropicThinkProvider(UniversalBaseModel):
- """Anthropic think provider configuration"""
-
- type: typing.Literal["anthropic"] = "anthropic"
- """Provider type"""
-
- model: typing.Literal["claude-3-5-haiku-latest", "claude-sonnet-4-20250514"]
- """Anthropic model to use"""
-
- temperature: typing.Optional[Temperature0to1] = None # type: ignore[valid-type]
- """Anthropic temperature (0-1)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1GoogleThinkProvider(UniversalBaseModel):
- """Google think provider configuration"""
-
- type: typing.Literal["google"] = "google"
- """Provider type"""
-
- model: typing.Literal["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemini-2.5-flash"]
- """Google model to use"""
-
- temperature: typing.Optional[Temperature0to2] = None # type: ignore[valid-type]
- """Google temperature (0-2)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1GroqThinkProvider(UniversalBaseModel):
- """Groq think provider configuration"""
-
- type: typing.Literal["groq"] = "groq"
- """Provider type"""
-
- model: typing.Literal["openai/gpt-oss-20b"]
- """Groq model to use"""
-
- temperature: typing.Optional[Temperature0to2] = None # type: ignore[valid-type]
- """Groq temperature (0-2)"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Think configuration
-class AgentV1Think(UniversalBaseModel):
- """Think configuration"""
-
- provider: typing.Union[
- AgentV1OpenAiThinkProvider, AgentV1AwsBedrockThinkProvider,
- AgentV1AnthropicThinkProvider, AgentV1GoogleThinkProvider,
- AgentV1GroqThinkProvider
- ]
- """Think provider configuration"""
-
- endpoint: typing.Optional[AgentV1Endpoint] = None
- """Optional for non-Deepgram LLM providers. When present, must include url field and headers object"""
-
- functions: typing.Optional[typing.List[AgentV1Function]] = None
- """Function definitions"""
-
- prompt: typing.Optional[str] = None
- """System prompt"""
-
- context_length: typing.Optional[typing.Union[typing.Literal["max"], IntContextLength]] = None # type: ignore[valid-type]
- """Specifies the number of characters retained in context between user messages, agent responses, and function calls"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Speak provider configurations
-class AgentV1DeepgramSpeakProvider(UniversalBaseModel):
- """Deepgram speak provider configuration"""
-
- type: typing.Literal["deepgram"] = "deepgram"
- """Provider type"""
-
- model: typing.Literal[
- # Aura-1 English Voices
- "aura-asteria-en", "aura-luna-en", "aura-stella-en", "aura-athena-en",
- "aura-hera-en", "aura-orion-en", "aura-arcas-en", "aura-perseus-en",
- "aura-angus-en", "aura-orpheus-en", "aura-helios-en", "aura-zeus-en",
- # Aura-2 English Voices
- "aura-2-amalthea-en", "aura-2-andromeda-en", "aura-2-apollo-en",
- "aura-2-arcas-en", "aura-2-aries-en", "aura-2-asteria-en",
- "aura-2-athena-en", "aura-2-atlas-en", "aura-2-aurora-en",
- "aura-2-callista-en", "aura-2-cora-en", "aura-2-cordelia-en",
- "aura-2-delia-en", "aura-2-draco-en", "aura-2-electra-en",
- "aura-2-harmonia-en", "aura-2-helena-en", "aura-2-hera-en",
- "aura-2-hermes-en", "aura-2-hyperion-en", "aura-2-iris-en",
- "aura-2-janus-en", "aura-2-juno-en", "aura-2-jupiter-en",
- "aura-2-luna-en", "aura-2-mars-en", "aura-2-minerva-en",
- "aura-2-neptune-en", "aura-2-odysseus-en", "aura-2-ophelia-en",
- "aura-2-orion-en", "aura-2-orpheus-en", "aura-2-pandora-en",
- "aura-2-phoebe-en", "aura-2-pluto-en", "aura-2-saturn-en",
- "aura-2-selene-en", "aura-2-thalia-en", "aura-2-theia-en",
- "aura-2-vesta-en", "aura-2-zeus-en",
- # Aura-2 Spanish Voices
- "aura-2-sirio-es", "aura-2-nestor-es", "aura-2-carina-es",
- "aura-2-celeste-es", "aura-2-alvaro-es", "aura-2-diana-es",
- "aura-2-aquila-es", "aura-2-selena-es", "aura-2-estrella-es",
- "aura-2-javier-es"
- ]
- """Deepgram TTS model"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1ElevenLabsSpeakProvider(UniversalBaseModel):
- """Eleven Labs speak provider configuration"""
-
- type: typing.Literal["eleven_labs"] = "eleven_labs"
- """Provider type"""
-
- model_id: typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"]
- """Eleven Labs model ID"""
-
- language_code: typing.Optional[str] = None
- """Eleven Labs optional language code"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1CartesiaVoice(UniversalBaseModel):
- """Cartesia voice configuration"""
-
- mode: str
- """Cartesia voice mode"""
-
- id: str
- """Cartesia voice ID"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1CartesiaSpeakProvider(UniversalBaseModel):
- """Cartesia speak provider configuration"""
-
- type: typing.Literal["cartesia"] = "cartesia"
- """Provider type"""
-
- model_id: typing.Literal["sonic-2", "sonic-multilingual"]
- """Cartesia model ID"""
-
- voice: AgentV1CartesiaVoice
- """Cartesia voice configuration"""
-
- language: typing.Optional[str] = None
- """Cartesia language code"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1OpenAiSpeakProvider(UniversalBaseModel):
- """OpenAI speak provider configuration"""
-
- type: typing.Literal["open_ai"] = "open_ai"
- """Provider type"""
-
- model: typing.Literal["tts-1", "tts-1-hd"]
- """OpenAI TTS model"""
-
- voice: typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
- """OpenAI voice"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1AwsPollySpeakProvider(UniversalBaseModel):
- """AWS Polly speak provider configuration"""
-
- type: typing.Literal["aws_polly"] = "aws_polly"
- """Provider type"""
-
- voice: typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"]
- """AWS Polly voice name"""
-
- language_code: str
- """Language code (e.g., "en-US")"""
-
- engine: typing.Literal["generative", "long-form", "standard", "neural"]
- """AWS Polly engine"""
-
- credentials: AgentV1AwsCredentials
- """AWS credentials"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-# Speak configuration
-class AgentV1SpeakProviderConfig(UniversalBaseModel):
- """Speak provider configuration wrapper"""
-
- provider: typing.Union[
- AgentV1DeepgramSpeakProvider, AgentV1ElevenLabsSpeakProvider,
- AgentV1CartesiaSpeakProvider, AgentV1OpenAiSpeakProvider,
- AgentV1AwsPollySpeakProvider
- ]
- """Speak provider configuration"""
-
- endpoint: typing.Optional[AgentV1Endpoint] = None
- """Optional if provider is Deepgram. Required for non-Deepgram TTS providers"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-
-# Agent configuration
-class AgentV1Agent(UniversalBaseModel):
- """Agent configuration"""
-
- language: typing.Optional[typing.Literal["en", "es"]] = "en"
- """Agent language"""
-
- context: typing.Optional[AgentV1Context] = None
- """Conversation context including the history of messages and function calls"""
-
- listen: typing.Optional[AgentV1Listen] = None
- """Listen configuration"""
-
- think: AgentV1Think
- """Think configuration"""
-
- speak: typing.Union[AgentV1SpeakProviderConfig, typing.List[AgentV1SpeakProviderConfig]]
- """Speak configuration - can be single provider or array of providers"""
-
- greeting: typing.Optional[str] = None
- """Optional message that agent will speak at the start"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class AgentV1SettingsMessage(UniversalBaseModel):
- """
- Configure the voice agent and sets the input and output audio formats
- """
-
- type: typing.Literal["Settings"] = "Settings"
- """Message type identifier"""
-
- audio: AgentV1AudioConfig
- """Audio configuration settings"""
-
- agent: AgentV1Agent
- """Agent configuration with proper nested types"""
-
- tags: typing.Optional[typing.List[str]] = None
- """Tags to associate with the request"""
-
- experimental: typing.Optional[bool] = False
- """To enable experimental features"""
-
- flags: typing.Optional[AgentV1Flags] = None
- """Agent flags configuration"""
-
- mip_opt_out: typing.Optional[bool] = False
- """To opt out of Deepgram Model Improvement Program"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_speak_updated_event.py b/src/deepgram/extensions/types/sockets/agent_v1_speak_updated_event.py
deleted file mode 100644
index bd518819..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_speak_updated_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Agent V1 Speak Updated Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1SpeakUpdatedEvent(UniversalBaseModel):
- """
- Confirms that an UpdateSpeak message from the client has been applied
- """
-
- type: typing.Literal["SpeakUpdated"] = "SpeakUpdated"
- """Message type identifier for speak update confirmation"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_update_prompt_message.py b/src/deepgram/extensions/types/sockets/agent_v1_update_prompt_message.py
deleted file mode 100644
index 5cd34061..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_update_prompt_message.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Agent V1 Update Prompt Message - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1UpdatePromptMessage(UniversalBaseModel):
- """
- Send a message to update the system prompt of the agent
- """
-
- type: typing.Literal["UpdatePrompt"] = "UpdatePrompt"
- """Message type identifier for prompt update request"""
-
- prompt: str
- """The new system prompt to be used by the agent"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_update_speak_message.py b/src/deepgram/extensions/types/sockets/agent_v1_update_speak_message.py
deleted file mode 100644
index cb391739..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_update_speak_message.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Agent V1 Update Speak Message - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-# Import the complete speak provider types from settings message
-from .agent_v1_settings_message import (
- AgentV1SpeakProviderConfig,
-)
-
-
-class AgentV1UpdateSpeakMessage(UniversalBaseModel):
- """
- Send a message to change the Speak model in the middle of a conversation
- """
-
- type: typing.Literal["UpdateSpeak"] = "UpdateSpeak"
- """Message type identifier for updating the speak model"""
-
- speak: AgentV1SpeakProviderConfig
- """Configuration for the speak model with proper nested types"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_user_started_speaking_event.py b/src/deepgram/extensions/types/sockets/agent_v1_user_started_speaking_event.py
deleted file mode 100644
index 786e6c50..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_user_started_speaking_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Agent V1 User Started Speaking Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1UserStartedSpeakingEvent(UniversalBaseModel):
- """
- Notify the client that the user has begun speaking
- """
-
- type: typing.Literal["UserStartedSpeaking"] = "UserStartedSpeaking"
- """Message type identifier indicating that the user has begun speaking"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_warning_event.py b/src/deepgram/extensions/types/sockets/agent_v1_warning_event.py
deleted file mode 100644
index 296ec399..00000000
--- a/src/deepgram/extensions/types/sockets/agent_v1_warning_event.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Agent V1 Warning Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class AgentV1WarningEvent(UniversalBaseModel):
- """
- Notifies the client of non-fatal errors or warnings
- """
-
- type: typing.Literal["Warning"] = "Warning"
- """Message type identifier for warnings"""
-
- description: str
- """Description of the warning"""
-
- code: str
- """Warning code identifier"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
\ No newline at end of file
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_media_message.py b/src/deepgram/extensions/types/sockets/listen_v1_media_message.py
deleted file mode 100644
index 3719cbc4..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v1_media_message.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Listen V1 Media Message - protected from auto-generation
-
-# This represents binary media data sent to the WebSocket
-# The actual data is bytes, but we define this as a type alias for clarity
-ListenV1MediaMessage = bytes
-"""
-Audio data transmitted as raw binary WebSocket messages.
-Content-Type: application/octet-stream
-"""
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_metadata_event.py b/src/deepgram/extensions/types/sockets/listen_v1_metadata_event.py
deleted file mode 100644
index d55c46df..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v1_metadata_event.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Listen V1 Metadata Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class ListenV1MetadataEvent(UniversalBaseModel):
- """
- Metadata event - these are usually information describing the connection
- """
-
- type: typing.Literal["Metadata"]
- """Message type identifier"""
-
- transaction_key: typing.Optional[str] = None
- """The transaction key (deprecated)"""
-
- request_id: str
- """The request ID"""
-
- sha256: str
- """The sha256"""
-
- created: str
- """The created timestamp"""
-
- duration: float
- """The duration"""
-
- channels: float
- """The channels"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/listen_v1_results_event.py b/src/deepgram/extensions/types/sockets/listen_v1_results_event.py
deleted file mode 100644
index ee879279..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v1_results_event.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Listen V1 Results Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class ListenV1Word(UniversalBaseModel):
- """Word in transcription results"""
- word: str
- """The word of the transcription"""
-
- start: float
- """The start time of the word"""
-
- end: float
- """The end time of the word"""
-
- confidence: float
- """The confidence of the word"""
-
- language: typing.Optional[str] = None
- """The language of the word"""
-
- punctuated_word: typing.Optional[str] = None
- """The punctuated word of the word"""
-
- speaker: typing.Optional[int] = None
- """The speaker of the word"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ListenV1Alternative(UniversalBaseModel):
- """Alternative transcription result"""
- transcript: str
- """The transcript of the transcription"""
-
- confidence: float
- """The confidence of the transcription"""
-
- languages: typing.Optional[typing.List[str]] = None
- """The languages of the transcription"""
-
- words: typing.List[ListenV1Word]
- """Array of words in the transcription"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ListenV1Channel(UniversalBaseModel):
- """Channel transcription results"""
- alternatives: typing.List[ListenV1Alternative]
- """Array of alternative transcription results"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ListenV1ModelInfo(UniversalBaseModel):
- """Model information"""
- name: str
- """The name of the model"""
-
- version: str
- """The version of the model"""
-
- arch: str
- """The arch of the model"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ListenV1ResultsMetadata(UniversalBaseModel):
- """Results metadata"""
- request_id: str
- """The request ID"""
-
- model_info: ListenV1ModelInfo
- """Model information"""
-
- model_uuid: str
- """The model UUID"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ListenV1ResultsEvent(UniversalBaseModel):
- """
- Deepgram has responded with a transcription
- """
-
- type: typing.Literal["Results"]
- """Message type identifier"""
-
- channel_index: typing.List[int]
- """The index of the channel"""
-
- duration: float
- """The duration of the transcription"""
-
- start: float
- """The start time of the transcription"""
-
- is_final: typing.Optional[bool] = None
- """Whether the transcription is final"""
-
- speech_final: typing.Optional[bool] = None
- """Whether the transcription is speech final"""
-
- channel: ListenV1Channel
- """Channel transcription results"""
-
- metadata: ListenV1ResultsMetadata
- """Results metadata"""
-
- from_finalize: typing.Optional[bool] = None
- """Whether the transcription is from a finalize message"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/listen_v2_connected_event.py b/src/deepgram/extensions/types/sockets/listen_v2_connected_event.py
deleted file mode 100644
index 39e97087..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v2_connected_event.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from typing import Optional
-
-from ....core.pydantic_utilities import UniversalBaseModel
-
-
-class ListenV2ConnectedEvent(UniversalBaseModel):
- type: str
- request_id: str
- sequence_id: int
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
diff --git a/src/deepgram/extensions/types/sockets/listen_v2_control_message.py b/src/deepgram/extensions/types/sockets/listen_v2_control_message.py
deleted file mode 100644
index cc69837d..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v2_control_message.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from typing import Literal
-
-from ....core.pydantic_utilities import UniversalBaseModel
-
-
-class ListenV2ControlMessage(UniversalBaseModel):
- """Control messages for managing the Speech to Text WebSocket connection"""
-
- type: Literal["CloseStream"]
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
diff --git a/src/deepgram/extensions/types/sockets/listen_v2_fatal_error_event.py b/src/deepgram/extensions/types/sockets/listen_v2_fatal_error_event.py
deleted file mode 100644
index eb7107ab..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v2_fatal_error_event.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from typing import Optional
-
-from ....core.pydantic_utilities import UniversalBaseModel
-
-
-class ListenV2FatalErrorEvent(UniversalBaseModel):
- type: str
- sequence_id: int
- code: str
- description: str
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
diff --git a/src/deepgram/extensions/types/sockets/listen_v2_media_message.py b/src/deepgram/extensions/types/sockets/listen_v2_media_message.py
deleted file mode 100644
index 79a7f054..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v2_media_message.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ....core.pydantic_utilities import UniversalBaseModel
-
-
-class ListenV2MediaMessage(UniversalBaseModel):
- """Audio data is transmitted as raw binary WebSocket messages"""
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
diff --git a/src/deepgram/extensions/types/sockets/listen_v2_turn_info_event.py b/src/deepgram/extensions/types/sockets/listen_v2_turn_info_event.py
deleted file mode 100644
index ab099dd9..00000000
--- a/src/deepgram/extensions/types/sockets/listen_v2_turn_info_event.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from typing import List, Optional
-
-from ....core.pydantic_utilities import UniversalBaseModel
-
-
-class ListenV2TurnInfoEventWordsItem(UniversalBaseModel):
- word: str
- confidence: float
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
-
-
-class ListenV2TurnInfoEvent(UniversalBaseModel):
- type: str
- request_id: str
- sequence_id: int
- event: str
- turn_index: int
- audio_window_start: float
- audio_window_end: float
- transcript: str
- words: List[ListenV2TurnInfoEventWordsItem]
- end_of_turn_confidence: float
-
- def json(self, **kwargs) -> str:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().json(**kwargs_with_defaults)
-
- def dict(self, **kwargs) -> dict:
- kwargs_with_defaults = {"by_alias": True, "exclude_unset": True, **kwargs}
- return super().dict(**kwargs_with_defaults)
-
- class Config:
- frozen = True
- extra = "forbid"
diff --git a/src/deepgram/extensions/types/sockets/socket_client_responses.py b/src/deepgram/extensions/types/sockets/socket_client_responses.py
deleted file mode 100644
index 7e8f1f48..00000000
--- a/src/deepgram/extensions/types/sockets/socket_client_responses.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Socket client response union types - protected from auto-generation
-
-import typing
-
-# Import all event types for union definitions
-if typing.TYPE_CHECKING:
- from .agent_v1_agent_audio_done_event import AgentV1AgentAudioDoneEvent
- from .agent_v1_agent_started_speaking_event import AgentV1AgentStartedSpeakingEvent
- from .agent_v1_agent_thinking_event import AgentV1AgentThinkingEvent
- from .agent_v1_audio_chunk_event import AgentV1AudioChunkEvent
- from .agent_v1_conversation_text_event import AgentV1ConversationTextEvent
- from .agent_v1_error_event import AgentV1ErrorEvent
- from .agent_v1_function_call_request_event import AgentV1FunctionCallRequestEvent
- from .agent_v1_function_call_response_message import AgentV1FunctionCallResponseMessage
- from .agent_v1_injection_refused_event import AgentV1InjectionRefusedEvent
- from .agent_v1_prompt_updated_event import AgentV1PromptUpdatedEvent
- from .agent_v1_settings_applied_event import AgentV1SettingsAppliedEvent
-
- # History messages may also be emitted by the server
- from .agent_v1_settings_message import AgentV1HistoryFunctionCalls, AgentV1HistoryMessage
- from .agent_v1_speak_updated_event import AgentV1SpeakUpdatedEvent
- from .agent_v1_user_started_speaking_event import AgentV1UserStartedSpeakingEvent
- from .agent_v1_warning_event import AgentV1WarningEvent
- from .agent_v1_welcome_message import AgentV1WelcomeMessage
- from .listen_v1_metadata_event import ListenV1MetadataEvent
- from .listen_v1_results_event import ListenV1ResultsEvent
- from .listen_v1_speech_started_event import ListenV1SpeechStartedEvent
- from .listen_v1_utterance_end_event import ListenV1UtteranceEndEvent
- from .listen_v2_connected_event import ListenV2ConnectedEvent
- from .listen_v2_fatal_error_event import ListenV2FatalErrorEvent
- from .listen_v2_turn_info_event import ListenV2TurnInfoEvent
- from .speak_v1_audio_chunk_event import SpeakV1AudioChunkEvent
- from .speak_v1_control_event import SpeakV1ControlEvent
- from .speak_v1_metadata_event import SpeakV1MetadataEvent
- from .speak_v1_warning_event import SpeakV1WarningEvent
-
-# Speak socket client can receive these message types (including binary audio)
-# Import the actual types for proper resolution
-from .speak_v1_audio_chunk_event import SpeakV1AudioChunkEvent
-from .speak_v1_control_event import SpeakV1ControlEvent
-from .speak_v1_metadata_event import SpeakV1MetadataEvent
-from .speak_v1_warning_event import SpeakV1WarningEvent
-
-SpeakV1SocketClientResponse = typing.Union[
- SpeakV1AudioChunkEvent, # Binary audio data
- SpeakV1MetadataEvent, # JSON metadata
- SpeakV1ControlEvent, # JSON control responses (Flushed, Cleared)
- SpeakV1WarningEvent, # JSON warnings
- bytes, # Raw binary audio chunks
-]
-
-# Listen socket client only receives JSON events
-# Import the actual types for proper resolution
-from .listen_v1_metadata_event import ListenV1MetadataEvent
-from .listen_v1_results_event import ListenV1ResultsEvent
-from .listen_v1_speech_started_event import ListenV1SpeechStartedEvent
-from .listen_v1_utterance_end_event import ListenV1UtteranceEndEvent
-
-ListenV1SocketClientResponse = typing.Union[
- ListenV1ResultsEvent,
- ListenV1MetadataEvent,
- ListenV1UtteranceEndEvent,
- ListenV1SpeechStartedEvent,
-]
-
-# Listen V2 socket client receives JSON events
-# Import the actual types for proper resolution
-from .listen_v2_connected_event import ListenV2ConnectedEvent
-from .listen_v2_fatal_error_event import ListenV2FatalErrorEvent
-from .listen_v2_turn_info_event import ListenV2TurnInfoEvent
-
-ListenV2SocketClientResponse = typing.Union[
- ListenV2ConnectedEvent,
- ListenV2TurnInfoEvent,
- ListenV2FatalErrorEvent,
-]
-
-# Agent socket client can receive both JSON events and binary audio
-# Import the actual types for proper resolution
-from .agent_v1_agent_audio_done_event import AgentV1AgentAudioDoneEvent
-from .agent_v1_agent_started_speaking_event import AgentV1AgentStartedSpeakingEvent
-from .agent_v1_agent_thinking_event import AgentV1AgentThinkingEvent
-from .agent_v1_audio_chunk_event import AgentV1AudioChunkEvent
-from .agent_v1_conversation_text_event import AgentV1ConversationTextEvent
-from .agent_v1_error_event import AgentV1ErrorEvent
-from .agent_v1_function_call_request_event import AgentV1FunctionCallRequestEvent
-from .agent_v1_function_call_response_message import AgentV1FunctionCallResponseMessage
-from .agent_v1_injection_refused_event import AgentV1InjectionRefusedEvent
-from .agent_v1_prompt_updated_event import AgentV1PromptUpdatedEvent
-from .agent_v1_settings_applied_event import AgentV1SettingsAppliedEvent
-from .agent_v1_settings_message import AgentV1HistoryFunctionCalls, AgentV1HistoryMessage
-from .agent_v1_speak_updated_event import AgentV1SpeakUpdatedEvent
-from .agent_v1_user_started_speaking_event import AgentV1UserStartedSpeakingEvent
-from .agent_v1_warning_event import AgentV1WarningEvent
-from .agent_v1_welcome_message import AgentV1WelcomeMessage
-
-AgentV1SocketClientResponse = typing.Union[
- AgentV1WelcomeMessage,
- AgentV1SettingsAppliedEvent,
- AgentV1HistoryMessage,
- AgentV1HistoryFunctionCalls,
- AgentV1ConversationTextEvent,
- AgentV1UserStartedSpeakingEvent,
- AgentV1AgentThinkingEvent,
- AgentV1FunctionCallRequestEvent,
- AgentV1FunctionCallResponseMessage, # Bidirectional: Server β Client function responses
- AgentV1AgentStartedSpeakingEvent,
- AgentV1AgentAudioDoneEvent,
- AgentV1PromptUpdatedEvent,
- AgentV1SpeakUpdatedEvent,
- AgentV1InjectionRefusedEvent,
- AgentV1ErrorEvent,
- AgentV1WarningEvent,
- AgentV1AudioChunkEvent, # Binary audio data
- bytes, # Raw binary audio chunks
-]
-
-# Backward compatibility aliases
-SpeakSocketClientResponse = SpeakV1SocketClientResponse
-ListenSocketClientResponse = ListenV1SocketClientResponse
-AgentSocketClientResponse = AgentV1SocketClientResponse
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_audio_chunk_event.py b/src/deepgram/extensions/types/sockets/speak_v1_audio_chunk_event.py
deleted file mode 100644
index 12f21cde..00000000
--- a/src/deepgram/extensions/types/sockets/speak_v1_audio_chunk_event.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Speak V1 Audio Chunk Event - protected from auto-generation
-
-# This represents binary audio data received from the WebSocket
-# The actual data is bytes, but we define this as a type alias for clarity
-SpeakV1AudioChunkEvent = bytes
-"""
-Audio data in the format specified by the request parameters.
-Content-Type: application/octet-stream
-"""
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_metadata_event.py b/src/deepgram/extensions/types/sockets/speak_v1_metadata_event.py
deleted file mode 100644
index a8f16f41..00000000
--- a/src/deepgram/extensions/types/sockets/speak_v1_metadata_event.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Speak V1 Metadata Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class SpeakV1MetadataEvent(UniversalBaseModel):
- """
- Metadata sent after the WebSocket handshake
- """
-
- type: typing.Literal["Metadata"]
- """Message type identifier"""
-
- request_id: str
- """Unique identifier for the request"""
-
- model_name: str
- """Name of the model being used"""
-
- model_version: str
- """Version of the model being used"""
-
- model_uuid: str
- """Unique identifier for the model"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/deepgram/extensions/types/sockets/speak_v1_warning_event.py b/src/deepgram/extensions/types/sockets/speak_v1_warning_event.py
deleted file mode 100644
index e5072763..00000000
--- a/src/deepgram/extensions/types/sockets/speak_v1_warning_event.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Speak V1 Warning Event - protected from auto-generation
-
-import typing
-
-import pydantic
-from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-
-
-class SpeakV1WarningEvent(UniversalBaseModel):
- """
- Warning event from the TTS WebSocket
- """
-
- type: typing.Literal["Warning"]
- """Message type identifier"""
-
- description: str
- """A description of what went wrong"""
-
- code: str
- """Error code identifying the type of error"""
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/__init__.py b/src/deepgram/listen/__init__.py
index 6186f5b4..0e163447 100644
--- a/src/deepgram/listen/__init__.py
+++ b/src/deepgram/listen/__init__.py
@@ -7,7 +7,92 @@
if typing.TYPE_CHECKING:
from . import v1, v2
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1", "v2": ".v2"}
+ from .v1 import (
+ ListenV1CloseStream,
+ ListenV1CloseStreamParams,
+ ListenV1CloseStreamType,
+ ListenV1Finalize,
+ ListenV1FinalizeParams,
+ ListenV1FinalizeType,
+ ListenV1KeepAlive,
+ ListenV1KeepAliveParams,
+ ListenV1KeepAliveType,
+ ListenV1Metadata,
+ ListenV1MetadataParams,
+ ListenV1Results,
+ ListenV1ResultsChannel,
+ ListenV1ResultsChannelAlternativesItem,
+ ListenV1ResultsChannelAlternativesItemParams,
+ ListenV1ResultsChannelAlternativesItemWordsItem,
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ ListenV1ResultsChannelParams,
+ ListenV1ResultsMetadata,
+ ListenV1ResultsMetadataModelInfo,
+ ListenV1ResultsMetadataModelInfoParams,
+ ListenV1ResultsMetadataParams,
+ ListenV1ResultsParams,
+ ListenV1SpeechStarted,
+ ListenV1SpeechStartedParams,
+ ListenV1UtteranceEnd,
+ ListenV1UtteranceEndParams,
+ )
+ from .v2 import (
+ ListenV2CloseStream,
+ ListenV2CloseStreamParams,
+ ListenV2CloseStreamType,
+ ListenV2Connected,
+ ListenV2ConnectedParams,
+ ListenV2FatalError,
+ ListenV2FatalErrorParams,
+ ListenV2TurnInfo,
+ ListenV2TurnInfoEvent,
+ ListenV2TurnInfoParams,
+ ListenV2TurnInfoWordsItem,
+ ListenV2TurnInfoWordsItemParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".v1",
+ "ListenV1CloseStreamParams": ".v1",
+ "ListenV1CloseStreamType": ".v1",
+ "ListenV1Finalize": ".v1",
+ "ListenV1FinalizeParams": ".v1",
+ "ListenV1FinalizeType": ".v1",
+ "ListenV1KeepAlive": ".v1",
+ "ListenV1KeepAliveParams": ".v1",
+ "ListenV1KeepAliveType": ".v1",
+ "ListenV1Metadata": ".v1",
+ "ListenV1MetadataParams": ".v1",
+ "ListenV1Results": ".v1",
+ "ListenV1ResultsChannel": ".v1",
+ "ListenV1ResultsChannelAlternativesItem": ".v1",
+ "ListenV1ResultsChannelAlternativesItemParams": ".v1",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".v1",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".v1",
+ "ListenV1ResultsChannelParams": ".v1",
+ "ListenV1ResultsMetadata": ".v1",
+ "ListenV1ResultsMetadataModelInfo": ".v1",
+ "ListenV1ResultsMetadataModelInfoParams": ".v1",
+ "ListenV1ResultsMetadataParams": ".v1",
+ "ListenV1ResultsParams": ".v1",
+ "ListenV1SpeechStarted": ".v1",
+ "ListenV1SpeechStartedParams": ".v1",
+ "ListenV1UtteranceEnd": ".v1",
+ "ListenV1UtteranceEndParams": ".v1",
+ "ListenV2CloseStream": ".v2",
+ "ListenV2CloseStreamParams": ".v2",
+ "ListenV2CloseStreamType": ".v2",
+ "ListenV2Connected": ".v2",
+ "ListenV2ConnectedParams": ".v2",
+ "ListenV2FatalError": ".v2",
+ "ListenV2FatalErrorParams": ".v2",
+ "ListenV2TurnInfo": ".v2",
+ "ListenV2TurnInfoEvent": ".v2",
+ "ListenV2TurnInfoParams": ".v2",
+ "ListenV2TurnInfoWordsItem": ".v2",
+ "ListenV2TurnInfoWordsItemParams": ".v2",
+ "v1": ".v1",
+ "v2": ".v2",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +116,46 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1", "v2"]
+__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamParams",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeParams",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveParams",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1MetadataParams",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStarted",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEnd",
+ "ListenV1UtteranceEndParams",
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamParams",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalError",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItem",
+ "ListenV2TurnInfoWordsItemParams",
+ "v1",
+ "v2",
+]
diff --git a/src/deepgram/listen/client.py b/src/deepgram/listen/client.py
index 8475d7f0..8a81d4ea 100644
--- a/src/deepgram/listen/client.py
+++ b/src/deepgram/listen/client.py
@@ -9,14 +9,13 @@
if typing.TYPE_CHECKING:
from .v1.client import AsyncV1Client, V1Client
- from .v2.client import AsyncV2Client, V2Client
+
class ListenClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._raw_client = RawListenClient(client_wrapper=client_wrapper)
self._client_wrapper = client_wrapper
self._v1: typing.Optional[V1Client] = None
- self._v2: typing.Optional[V2Client] = None
@property
def with_raw_response(self) -> RawListenClient:
@@ -37,22 +36,12 @@ def v1(self):
self._v1 = V1Client(client_wrapper=self._client_wrapper)
return self._v1
- # TODO: Manual workaround due to fern generator bug
- @property
- def v2(self):
- if self._v2 is None:
- from .v2.client import V2Client # noqa: E402
-
- self._v2 = V2Client(client_wrapper=self._client_wrapper)
- return self._v2
-
class AsyncListenClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._raw_client = AsyncRawListenClient(client_wrapper=client_wrapper)
self._client_wrapper = client_wrapper
self._v1: typing.Optional[AsyncV1Client] = None
- self._v2: typing.Optional[AsyncV2Client] = None
@property
def with_raw_response(self) -> AsyncRawListenClient:
@@ -72,12 +61,3 @@ def v1(self):
self._v1 = AsyncV1Client(client_wrapper=self._client_wrapper)
return self._v1
-
- # TODO: Manual workaround due to fern generator bug
- @property
- def v2(self):
- if self._v2 is None:
- from .v2.client import AsyncV2Client # noqa: E402
-
- self._v2 = AsyncV2Client(client_wrapper=self._client_wrapper)
- return self._v2
diff --git a/src/deepgram/listen/v1/__init__.py b/src/deepgram/listen/v1/__init__.py
index a3dcf43f..f19d2fd1 100644
--- a/src/deepgram/listen/v1/__init__.py
+++ b/src/deepgram/listen/v1/__init__.py
@@ -6,6 +6,23 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ ListenV1CloseStream,
+ ListenV1CloseStreamType,
+ ListenV1Finalize,
+ ListenV1FinalizeType,
+ ListenV1KeepAlive,
+ ListenV1KeepAliveType,
+ ListenV1Metadata,
+ ListenV1Results,
+ ListenV1ResultsChannel,
+ ListenV1ResultsChannelAlternativesItem,
+ ListenV1ResultsChannelAlternativesItemWordsItem,
+ ListenV1ResultsMetadata,
+ ListenV1ResultsMetadataModelInfo,
+ ListenV1SpeechStarted,
+ ListenV1UtteranceEnd,
+ )
from . import media
from .media import (
MediaTranscribeRequestCallbackMethod,
@@ -18,7 +35,48 @@
MediaTranscribeResponse,
MediaTranscribeResponseParams,
)
+ from .requests import (
+ ListenV1CloseStreamParams,
+ ListenV1FinalizeParams,
+ ListenV1KeepAliveParams,
+ ListenV1MetadataParams,
+ ListenV1ResultsChannelAlternativesItemParams,
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ ListenV1ResultsChannelParams,
+ ListenV1ResultsMetadataModelInfoParams,
+ ListenV1ResultsMetadataParams,
+ ListenV1ResultsParams,
+ ListenV1SpeechStartedParams,
+ ListenV1UtteranceEndParams,
+ )
_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".types",
+ "ListenV1CloseStreamParams": ".requests",
+ "ListenV1CloseStreamType": ".types",
+ "ListenV1Finalize": ".types",
+ "ListenV1FinalizeParams": ".requests",
+ "ListenV1FinalizeType": ".types",
+ "ListenV1KeepAlive": ".types",
+ "ListenV1KeepAliveParams": ".requests",
+ "ListenV1KeepAliveType": ".types",
+ "ListenV1Metadata": ".types",
+ "ListenV1MetadataParams": ".requests",
+ "ListenV1Results": ".types",
+ "ListenV1ResultsChannel": ".types",
+ "ListenV1ResultsChannelAlternativesItem": ".types",
+ "ListenV1ResultsChannelAlternativesItemParams": ".requests",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".types",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".requests",
+ "ListenV1ResultsChannelParams": ".requests",
+ "ListenV1ResultsMetadata": ".types",
+ "ListenV1ResultsMetadataModelInfo": ".types",
+ "ListenV1ResultsMetadataModelInfoParams": ".requests",
+ "ListenV1ResultsMetadataParams": ".requests",
+ "ListenV1ResultsParams": ".requests",
+ "ListenV1SpeechStarted": ".types",
+ "ListenV1SpeechStartedParams": ".requests",
+ "ListenV1UtteranceEnd": ".types",
+ "ListenV1UtteranceEndParams": ".requests",
"MediaTranscribeRequestCallbackMethod": ".media",
"MediaTranscribeRequestCustomIntentMode": ".media",
"MediaTranscribeRequestCustomTopicMode": ".media",
@@ -54,6 +112,33 @@ def __dir__():
__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamParams",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeParams",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveParams",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1MetadataParams",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStarted",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEnd",
+ "ListenV1UtteranceEndParams",
"MediaTranscribeRequestCallbackMethod",
"MediaTranscribeRequestCustomIntentMode",
"MediaTranscribeRequestCustomTopicMode",
diff --git a/src/deepgram/listen/v1/media/client.py b/src/deepgram/listen/v1/media/client.py
index 047dfac4..ab81ae3b 100644
--- a/src/deepgram/listen/v1/media/client.py
+++ b/src/deepgram/listen/v1/media/client.py
@@ -206,6 +206,41 @@ def transcribe_url(
api_key="YOUR_API_KEY",
)
client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
"""
@@ -661,6 +696,41 @@ async def transcribe_url(
async def main() -> None:
await client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
diff --git a/src/deepgram/listen/v1/requests/__init__.py b/src/deepgram/listen/v1/requests/__init__.py
new file mode 100644
index 00000000..519d1bb8
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/__init__.py
@@ -0,0 +1,73 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v1close_stream import ListenV1CloseStreamParams
+ from .listen_v1finalize import ListenV1FinalizeParams
+ from .listen_v1keep_alive import ListenV1KeepAliveParams
+ from .listen_v1metadata import ListenV1MetadataParams
+ from .listen_v1results import ListenV1ResultsParams
+ from .listen_v1results_channel import ListenV1ResultsChannelParams
+ from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItemParams
+ from .listen_v1results_channel_alternatives_item_words_item import (
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ )
+ from .listen_v1results_metadata import ListenV1ResultsMetadataParams
+ from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfoParams
+ from .listen_v1speech_started import ListenV1SpeechStartedParams
+ from .listen_v1utterance_end import ListenV1UtteranceEndParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStreamParams": ".listen_v1close_stream",
+ "ListenV1FinalizeParams": ".listen_v1finalize",
+ "ListenV1KeepAliveParams": ".listen_v1keep_alive",
+ "ListenV1MetadataParams": ".listen_v1metadata",
+ "ListenV1ResultsChannelAlternativesItemParams": ".listen_v1results_channel_alternatives_item",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".listen_v1results_channel_alternatives_item_words_item",
+ "ListenV1ResultsChannelParams": ".listen_v1results_channel",
+ "ListenV1ResultsMetadataModelInfoParams": ".listen_v1results_metadata_model_info",
+ "ListenV1ResultsMetadataParams": ".listen_v1results_metadata",
+ "ListenV1ResultsParams": ".listen_v1results",
+ "ListenV1SpeechStartedParams": ".listen_v1speech_started",
+ "ListenV1UtteranceEndParams": ".listen_v1utterance_end",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV1CloseStreamParams",
+ "ListenV1FinalizeParams",
+ "ListenV1KeepAliveParams",
+ "ListenV1MetadataParams",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEndParams",
+]
diff --git a/src/deepgram/listen/v1/requests/listen_v1close_stream.py b/src/deepgram/listen/v1/requests/listen_v1close_stream.py
new file mode 100644
index 00000000..c75ad0e1
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1close_stream.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1close_stream_type import ListenV1CloseStreamType
+
+
+class ListenV1CloseStreamParams(typing_extensions.TypedDict):
+ type: ListenV1CloseStreamType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1finalize.py b/src/deepgram/listen/v1/requests/listen_v1finalize.py
new file mode 100644
index 00000000..8dd6d16e
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1finalize.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1finalize_type import ListenV1FinalizeType
+
+
+class ListenV1FinalizeParams(typing_extensions.TypedDict):
+ type: ListenV1FinalizeType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1keep_alive.py b/src/deepgram/listen/v1/requests/listen_v1keep_alive.py
new file mode 100644
index 00000000..b40242bd
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1keep_alive.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1keep_alive_type import ListenV1KeepAliveType
+
+
+class ListenV1KeepAliveParams(typing_extensions.TypedDict):
+ type: ListenV1KeepAliveType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1metadata.py b/src/deepgram/listen/v1/requests/listen_v1metadata.py
new file mode 100644
index 00000000..2b648b48
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1metadata.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1MetadataParams(typing_extensions.TypedDict):
+ type: typing.Literal["Metadata"]
+ """
+ Message type identifier
+ """
+
+ transaction_key: str
+ """
+ The transaction key
+ """
+
+ request_id: str
+ """
+ The request ID
+ """
+
+ sha256: str
+ """
+ The sha256
+ """
+
+ created: str
+ """
+ The created
+ """
+
+ duration: float
+ """
+ The duration
+ """
+
+ channels: float
+ """
+ The channels
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results.py b/src/deepgram/listen/v1/requests/listen_v1results.py
new file mode 100644
index 00000000..ad57f7c5
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel import ListenV1ResultsChannelParams
+from .listen_v1results_metadata import ListenV1ResultsMetadataParams
+
+
+class ListenV1ResultsParams(typing_extensions.TypedDict):
+ type: typing.Literal["Results"]
+ """
+ Message type identifier
+ """
+
+ channel_index: typing.Sequence[float]
+ """
+ The index of the channel
+ """
+
+ duration: float
+ """
+ The duration of the transcription
+ """
+
+ start: float
+ """
+ The start time of the transcription
+ """
+
+ is_final: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is final
+ """
+
+ speech_final: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is speech final
+ """
+
+ channel: ListenV1ResultsChannelParams
+ metadata: ListenV1ResultsMetadataParams
+ from_finalize: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is from a finalize message
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel.py b/src/deepgram/listen/v1/requests/listen_v1results_channel.py
new file mode 100644
index 00000000..f27e364d
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItemParams
+
+
+class ListenV1ResultsChannelParams(typing_extensions.TypedDict):
+ alternatives: typing.Sequence[ListenV1ResultsChannelAlternativesItemParams]
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py
new file mode 100644
index 00000000..5e52e389
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItemParams
+
+
+class ListenV1ResultsChannelAlternativesItemParams(typing_extensions.TypedDict):
+ transcript: str
+ """
+ The transcript of the transcription
+ """
+
+ confidence: float
+ """
+ The confidence of the transcription
+ """
+
+ languages: typing.Sequence[str]
+ words: typing.Sequence[ListenV1ResultsChannelAlternativesItemWordsItemParams]
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py
new file mode 100644
index 00000000..6252f31b
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResultsChannelAlternativesItemWordsItemParams(typing_extensions.TypedDict):
+ word: str
+ """
+ The word of the transcription
+ """
+
+ start: float
+ """
+ The start time of the word
+ """
+
+ end: float
+ """
+ The end time of the word
+ """
+
+ confidence: float
+ """
+ The confidence of the word
+ """
+
+ language: str
+ """
+ The language of the word
+ """
+
+ punctuated_word: str
+ """
+ The punctuated word of the word
+ """
+
+ speaker: typing_extensions.NotRequired[float]
+ """
+ The speaker of the word
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_metadata.py b/src/deepgram/listen/v1/requests/listen_v1results_metadata.py
new file mode 100644
index 00000000..fb5037c8
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_metadata.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfoParams
+
+
+class ListenV1ResultsMetadataParams(typing_extensions.TypedDict):
+ request_id: str
+ """
+ The request ID
+ """
+
+ model_info: ListenV1ResultsMetadataModelInfoParams
+ model_uuid: str
+ """
+ The model UUID
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py b/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py
new file mode 100644
index 00000000..f953fdce
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResultsMetadataModelInfoParams(typing_extensions.TypedDict):
+ name: str
+ """
+ The name of the model
+ """
+
+ version: str
+ """
+ The version of the model
+ """
+
+ arch: str
+ """
+ The arch of the model
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1speech_started.py b/src/deepgram/listen/v1/requests/listen_v1speech_started.py
new file mode 100644
index 00000000..1cc1dcfa
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1speech_started.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1SpeechStartedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SpeechStarted"]
+ """
+ Message type identifier
+ """
+
+ channel: typing.Sequence[float]
+ """
+ The channel
+ """
+
+ timestamp: float
+ """
+ The timestamp
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1utterance_end.py b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py
new file mode 100644
index 00000000..37ae57b8
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1UtteranceEndParams(typing_extensions.TypedDict):
+ type: typing.Literal["UtteranceEnd"]
+ """
+ Message type identifier
+ """
+
+ channel: typing.Sequence[float]
+ """
+ The channel
+ """
+
+ last_word_end: float
+ """
+ The last word end
+ """
diff --git a/src/deepgram/listen/v1/socket_client.py b/src/deepgram/listen/v1/socket_client.py
index 10ea9759..f34f6511 100644
--- a/src/deepgram/listen/v1/socket_client.py
+++ b/src/deepgram/listen/v1/socket_client.py
@@ -1,5 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
-# Enhanced with binary message support, comprehensive socket types, and send methods.
import json
import typing
@@ -9,29 +8,20 @@
import websockets.sync.connection as websockets_sync_connection
from ...core.events import EventEmitterMixin, EventType
from ...core.pydantic_utilities import parse_obj_as
+from .types.listen_v1close_stream import ListenV1CloseStream
+from .types.listen_v1finalize import ListenV1Finalize
+from .types.listen_v1keep_alive import ListenV1KeepAlive
+from .types.listen_v1metadata import ListenV1Metadata
+from .types.listen_v1results import ListenV1Results
+from .types.listen_v1speech_started import ListenV1SpeechStarted
+from .types.listen_v1utterance_end import ListenV1UtteranceEnd
try:
from websockets.legacy.client import WebSocketClientProtocol # type: ignore
except ImportError:
from websockets import WebSocketClientProtocol # type: ignore
-# Socket message types
-from ...extensions.types.sockets import (
- ListenV1ControlMessage,
- ListenV1MediaMessage,
- ListenV1MetadataEvent,
- ListenV1ResultsEvent,
- ListenV1SpeechStartedEvent,
- ListenV1UtteranceEndEvent,
-)
-
-# Response union type (Listen only receives JSON events)
-V1SocketClientResponse = typing.Union[
- ListenV1ResultsEvent,
- ListenV1MetadataEvent,
- ListenV1UtteranceEndEvent,
- ListenV1SpeechStartedEvent,
-]
+V1SocketClientResponse = typing.Union[ListenV1Results, ListenV1Metadata, ListenV1UtteranceEnd, ListenV1SpeechStarted]
class AsyncV1SocketClient(EventEmitterMixin):
@@ -39,37 +29,13 @@ def __init__(self, *, websocket: WebSocketClientProtocol):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
async def __aiter__(self):
async for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
async def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
@@ -80,48 +46,63 @@ async def start_listening(self):
await self._emit_async(EventType.OPEN, None)
try:
async for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
await self._emit_async(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- await self._emit_async(EventType.ERROR, exc)
+ await self._emit_async(EventType.ERROR, exc)
finally:
await self._emit_async(EventType.CLOSE, None)
+ async def send_listen_v_1_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
+ await self._send_model(message)
+
+ async def send_listen_v_1_finalize(self, message: ListenV1Finalize) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1Finalize.
+ """
+ await self._send_model(message)
+
+ async def send_listen_v_1_close_stream(self, message: ListenV1CloseStream) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1CloseStream.
+ """
+ await self._send_model(message)
+
+ async def send_listen_v_1_keep_alive(self, message: ListenV1KeepAlive) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1KeepAlive.
+ """
+ await self._send_model(message)
+
async def recv(self) -> V1SocketClientResponse:
"""
Receive a message from the websocket connection.
"""
data = await self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
async def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- await self._websocket.send(data)
- elif isinstance(data, dict):
- await self._websocket.send(json.dumps(data))
- else:
- await self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ await self._websocket.send(data)
async def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- await self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- async def send_control(self, message: ListenV1ControlMessage) -> None:
- """Send a control message (keep_alive, finalize, etc.)."""
- await self._send_model(message)
-
- async def send_media(self, message: ListenV1MediaMessage) -> None:
- """Send binary audio data for transcription."""
- await self._send(message)
+ await self._send(data.dict())
class V1SocketClient(EventEmitterMixin):
@@ -129,37 +110,13 @@ def __init__(self, *, websocket: websockets_sync_connection.Connection):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
def __iter__(self):
for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
@@ -170,45 +127,60 @@ def start_listening(self):
self._emit(EventType.OPEN, None)
try:
for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
self._emit(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- self._emit(EventType.ERROR, exc)
+ self._emit(EventType.ERROR, exc)
finally:
self._emit(EventType.CLOSE, None)
+ def send_listen_v_1_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
+ self._send_model(message)
+
+ def send_listen_v_1_finalize(self, message: ListenV1Finalize) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1Finalize.
+ """
+ self._send_model(message)
+
+ def send_listen_v_1_close_stream(self, message: ListenV1CloseStream) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1CloseStream.
+ """
+ self._send_model(message)
+
+ def send_listen_v_1_keep_alive(self, message: ListenV1KeepAlive) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV1KeepAlive.
+ """
+ self._send_model(message)
+
def recv(self) -> V1SocketClientResponse:
"""
Receive a message from the websocket connection.
"""
data = self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- self._websocket.send(data)
- elif isinstance(data, dict):
- self._websocket.send(json.dumps(data))
- else:
- self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ self._websocket.send(data)
def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- def send_control(self, message: ListenV1ControlMessage) -> None:
- """Send a control message (keep_alive, finalize, etc.)."""
- self._send_model(message)
-
- def send_media(self, message: ListenV1MediaMessage) -> None:
- """Send binary audio data for transcription."""
- self._send(message)
+ self._send(data.dict())
diff --git a/src/deepgram/listen/v1/types/__init__.py b/src/deepgram/listen/v1/types/__init__.py
new file mode 100644
index 00000000..2168d44b
--- /dev/null
+++ b/src/deepgram/listen/v1/types/__init__.py
@@ -0,0 +1,80 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v1close_stream import ListenV1CloseStream
+ from .listen_v1close_stream_type import ListenV1CloseStreamType
+ from .listen_v1finalize import ListenV1Finalize
+ from .listen_v1finalize_type import ListenV1FinalizeType
+ from .listen_v1keep_alive import ListenV1KeepAlive
+ from .listen_v1keep_alive_type import ListenV1KeepAliveType
+ from .listen_v1metadata import ListenV1Metadata
+ from .listen_v1results import ListenV1Results
+ from .listen_v1results_channel import ListenV1ResultsChannel
+ from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItem
+ from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItem
+ from .listen_v1results_metadata import ListenV1ResultsMetadata
+ from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfo
+ from .listen_v1speech_started import ListenV1SpeechStarted
+ from .listen_v1utterance_end import ListenV1UtteranceEnd
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".listen_v1close_stream",
+ "ListenV1CloseStreamType": ".listen_v1close_stream_type",
+ "ListenV1Finalize": ".listen_v1finalize",
+ "ListenV1FinalizeType": ".listen_v1finalize_type",
+ "ListenV1KeepAlive": ".listen_v1keep_alive",
+ "ListenV1KeepAliveType": ".listen_v1keep_alive_type",
+ "ListenV1Metadata": ".listen_v1metadata",
+ "ListenV1Results": ".listen_v1results",
+ "ListenV1ResultsChannel": ".listen_v1results_channel",
+ "ListenV1ResultsChannelAlternativesItem": ".listen_v1results_channel_alternatives_item",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".listen_v1results_channel_alternatives_item_words_item",
+ "ListenV1ResultsMetadata": ".listen_v1results_metadata",
+ "ListenV1ResultsMetadataModelInfo": ".listen_v1results_metadata_model_info",
+ "ListenV1SpeechStarted": ".listen_v1speech_started",
+ "ListenV1UtteranceEnd": ".listen_v1utterance_end",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1SpeechStarted",
+ "ListenV1UtteranceEnd",
+]
diff --git a/src/deepgram/listen/v1/types/listen_v1close_stream.py b/src/deepgram/listen/v1/types/listen_v1close_stream.py
new file mode 100644
index 00000000..6c11646f
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1close_stream.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1close_stream_type import ListenV1CloseStreamType
+
+
+class ListenV1CloseStream(UniversalBaseModel):
+ type: ListenV1CloseStreamType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1close_stream_type.py b/src/deepgram/listen/v1/types/listen_v1close_stream_type.py
new file mode 100644
index 00000000..e5332dfd
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1close_stream_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1CloseStreamType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1finalize.py b/src/deepgram/listen/v1/types/listen_v1finalize.py
new file mode 100644
index 00000000..ffd48baa
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1finalize.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1finalize_type import ListenV1FinalizeType
+
+
+class ListenV1Finalize(UniversalBaseModel):
+ type: ListenV1FinalizeType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1finalize_type.py b/src/deepgram/listen/v1/types/listen_v1finalize_type.py
new file mode 100644
index 00000000..c8e1de82
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1finalize_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1FinalizeType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1keep_alive.py b/src/deepgram/listen/v1/types/listen_v1keep_alive.py
new file mode 100644
index 00000000..96d3e67a
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1keep_alive.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1keep_alive_type import ListenV1KeepAliveType
+
+
+class ListenV1KeepAlive(UniversalBaseModel):
+ type: ListenV1KeepAliveType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py b/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py
new file mode 100644
index 00000000..36b22ae4
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1KeepAliveType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1metadata.py b/src/deepgram/listen/v1/types/listen_v1metadata.py
new file mode 100644
index 00000000..d86a5253
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1metadata.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1Metadata(UniversalBaseModel):
+ type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata")
+ """
+ Message type identifier
+ """
+
+ transaction_key: str = pydantic.Field()
+ """
+ The transaction key
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ The request ID
+ """
+
+ sha256: str = pydantic.Field()
+ """
+ The sha256
+ """
+
+ created: str = pydantic.Field()
+ """
+ The created
+ """
+
+ duration: float = pydantic.Field()
+ """
+ The duration
+ """
+
+ channels: float = pydantic.Field()
+ """
+ The channels
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results.py b/src/deepgram/listen/v1/types/listen_v1results.py
new file mode 100644
index 00000000..d86fb5c0
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel import ListenV1ResultsChannel
+from .listen_v1results_metadata import ListenV1ResultsMetadata
+
+
+class ListenV1Results(UniversalBaseModel):
+ type: typing.Literal["Results"] = pydantic.Field(default="Results")
+ """
+ Message type identifier
+ """
+
+ channel_index: typing.List[float] = pydantic.Field()
+ """
+ The index of the channel
+ """
+
+ duration: float = pydantic.Field()
+ """
+ The duration of the transcription
+ """
+
+ start: float = pydantic.Field()
+ """
+ The start time of the transcription
+ """
+
+ is_final: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is final
+ """
+
+ speech_final: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is speech final
+ """
+
+ channel: ListenV1ResultsChannel
+ metadata: ListenV1ResultsMetadata
+ from_finalize: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is from a finalize message
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel.py b/src/deepgram/listen/v1/types/listen_v1results_channel.py
new file mode 100644
index 00000000..ce58c5f3
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItem
+
+
+class ListenV1ResultsChannel(UniversalBaseModel):
+ alternatives: typing.List[ListenV1ResultsChannelAlternativesItem]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py
new file mode 100644
index 00000000..d4be6fa0
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItem
+
+
+class ListenV1ResultsChannelAlternativesItem(UniversalBaseModel):
+ transcript: str = pydantic.Field()
+ """
+ The transcript of the transcription
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ The confidence of the transcription
+ """
+
+ languages: typing.List[str]
+ words: typing.List[ListenV1ResultsChannelAlternativesItemWordsItem]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py
new file mode 100644
index 00000000..0a490289
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResultsChannelAlternativesItemWordsItem(UniversalBaseModel):
+ word: str = pydantic.Field()
+ """
+ The word of the transcription
+ """
+
+ start: float = pydantic.Field()
+ """
+ The start time of the word
+ """
+
+ end: float = pydantic.Field()
+ """
+ The end time of the word
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ The confidence of the word
+ """
+
+ language: str = pydantic.Field()
+ """
+ The language of the word
+ """
+
+ punctuated_word: str = pydantic.Field()
+ """
+ The punctuated word of the word
+ """
+
+ speaker: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ The speaker of the word
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_metadata.py b/src/deepgram/listen/v1/types/listen_v1results_metadata.py
new file mode 100644
index 00000000..92518626
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_metadata.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfo
+
+
+class ListenV1ResultsMetadata(UniversalBaseModel):
+ request_id: str = pydantic.Field()
+ """
+ The request ID
+ """
+
+ model_info: ListenV1ResultsMetadataModelInfo
+ model_uuid: str = pydantic.Field()
+ """
+ The model UUID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py b/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py
new file mode 100644
index 00000000..19e04fa8
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResultsMetadataModelInfo(UniversalBaseModel):
+ name: str = pydantic.Field()
+ """
+ The name of the model
+ """
+
+ version: str = pydantic.Field()
+ """
+ The version of the model
+ """
+
+ arch: str = pydantic.Field()
+ """
+ The arch of the model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1speech_started.py b/src/deepgram/listen/v1/types/listen_v1speech_started.py
new file mode 100644
index 00000000..ce42f749
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1speech_started.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1SpeechStarted(UniversalBaseModel):
+ type: typing.Literal["SpeechStarted"] = pydantic.Field(default="SpeechStarted")
+ """
+ Message type identifier
+ """
+
+ channel: typing.List[float] = pydantic.Field()
+ """
+ The channel
+ """
+
+ timestamp: float = pydantic.Field()
+ """
+ The timestamp
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1utterance_end.py b/src/deepgram/listen/v1/types/listen_v1utterance_end.py
new file mode 100644
index 00000000..39cb1100
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1utterance_end.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1UtteranceEnd(UniversalBaseModel):
+ type: typing.Literal["UtteranceEnd"] = pydantic.Field(default="UtteranceEnd")
+ """
+ Message type identifier
+ """
+
+ channel: typing.List[float] = pydantic.Field()
+ """
+ The channel
+ """
+
+ last_word_end: float = pydantic.Field()
+ """
+ The last word end
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/__init__.py b/src/deepgram/listen/v2/__init__.py
index 5cde0202..db7c724c 100644
--- a/src/deepgram/listen/v2/__init__.py
+++ b/src/deepgram/listen/v2/__init__.py
@@ -2,3 +2,74 @@
# isort: skip_file
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .types import (
+ ListenV2CloseStream,
+ ListenV2CloseStreamType,
+ ListenV2Connected,
+ ListenV2FatalError,
+ ListenV2TurnInfo,
+ ListenV2TurnInfoEvent,
+ ListenV2TurnInfoWordsItem,
+ )
+ from .requests import (
+ ListenV2CloseStreamParams,
+ ListenV2ConnectedParams,
+ ListenV2FatalErrorParams,
+ ListenV2TurnInfoParams,
+ ListenV2TurnInfoWordsItemParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStream": ".types",
+ "ListenV2CloseStreamParams": ".requests",
+ "ListenV2CloseStreamType": ".types",
+ "ListenV2Connected": ".types",
+ "ListenV2ConnectedParams": ".requests",
+ "ListenV2FatalError": ".types",
+ "ListenV2FatalErrorParams": ".requests",
+ "ListenV2TurnInfo": ".types",
+ "ListenV2TurnInfoEvent": ".types",
+ "ListenV2TurnInfoParams": ".requests",
+ "ListenV2TurnInfoWordsItem": ".types",
+ "ListenV2TurnInfoWordsItemParams": ".requests",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamParams",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalError",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItem",
+ "ListenV2TurnInfoWordsItemParams",
+]
diff --git a/src/deepgram/listen/v2/requests/__init__.py b/src/deepgram/listen/v2/requests/__init__.py
new file mode 100644
index 00000000..96ce5ece
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/__init__.py
@@ -0,0 +1,50 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v2close_stream import ListenV2CloseStreamParams
+ from .listen_v2connected import ListenV2ConnectedParams
+ from .listen_v2fatal_error import ListenV2FatalErrorParams
+ from .listen_v2turn_info import ListenV2TurnInfoParams
+ from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStreamParams": ".listen_v2close_stream",
+ "ListenV2ConnectedParams": ".listen_v2connected",
+ "ListenV2FatalErrorParams": ".listen_v2fatal_error",
+ "ListenV2TurnInfoParams": ".listen_v2turn_info",
+ "ListenV2TurnInfoWordsItemParams": ".listen_v2turn_info_words_item",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStreamParams",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItemParams",
+]
diff --git a/src/deepgram/listen/v2/requests/listen_v2close_stream.py b/src/deepgram/listen/v2/requests/listen_v2close_stream.py
new file mode 100644
index 00000000..70e4f760
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2close_stream.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v2close_stream_type import ListenV2CloseStreamType
+
+
+class ListenV2CloseStreamParams(typing_extensions.TypedDict):
+ type: ListenV2CloseStreamType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2connected.py b/src/deepgram/listen/v2/requests/listen_v2connected.py
new file mode 100644
index 00000000..c931eec2
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2connected.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV2ConnectedParams(typing_extensions.TypedDict):
+ type: typing.Literal["Connected"]
+ """
+ Message type identifier
+ """
+
+ request_id: str
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `TurnInfo` messages.
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2fatal_error.py b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py
new file mode 100644
index 00000000..05cb3041
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV2FatalErrorParams(typing_extensions.TypedDict):
+ type: typing.Literal["Error"]
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `Connected` messages.
+ """
+
+ code: str
+ """
+ A string code describing the error, e.g. `INTERNAL_SERVER_ERROR`
+ """
+
+ description: str
+ """
+ Prose description of the error
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2turn_info.py b/src/deepgram/listen/v2/requests/listen_v2turn_info.py
new file mode 100644
index 00000000..d1a15fec
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2turn_info.py
@@ -0,0 +1,65 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.listen_v2turn_info_event import ListenV2TurnInfoEvent
+from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams
+
+
+class ListenV2TurnInfoParams(typing_extensions.TypedDict):
+ """
+ Describes the current turn and latest state of the turn
+ """
+
+ type: typing.Literal["TurnInfo"]
+ request_id: str
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends to the client. This includes messages of other types, like `Connected` messages.
+ """
+
+ event: ListenV2TurnInfoEvent
+ """
+ The type of event being reported.
+
+ - **Update** - Additional audio has been transcribed, but the turn state hasn't changed
+ - **StartOfTurn** - The user has begun speaking for the first time in the turn
+ - **EagerEndOfTurn** - The system has moderate confidence that the user has finished speaking for the turn. This is an opportunity to begin preparing an agent reply
+ - **TurnResumed** - The system detected that speech had ended and therefore sent an **EagerEndOfTurn** event, but speech is actually continuing for this turn
+ - **EndOfTurn** - The user has finished speaking for the turn
+ """
+
+ turn_index: float
+ """
+ The index of the current turn
+ """
+
+ audio_window_start: float
+ """
+ Start time in seconds of the audio range that was transcribed
+ """
+
+ audio_window_end: float
+ """
+ End time in seconds of the audio range that was transcribed
+ """
+
+ transcript: str
+ """
+ Text that was said over the course of the current turn
+ """
+
+ words: typing.Sequence[ListenV2TurnInfoWordsItemParams]
+ """
+ The words in the `transcript`
+ """
+
+ end_of_turn_confidence: float
+ """
+ Confidence that no more speech is coming in this turn
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py b/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py
new file mode 100644
index 00000000..397157f5
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV2TurnInfoWordsItemParams(typing_extensions.TypedDict):
+ word: str
+ """
+ The individual punctuated, properly-cased word from the transcript
+ """
+
+ confidence: float
+ """
+ Confidence that this word was transcribed correctly
+ """
diff --git a/src/deepgram/listen/v2/socket_client.py b/src/deepgram/listen/v2/socket_client.py
index ded23989..4bc7247c 100644
--- a/src/deepgram/listen/v2/socket_client.py
+++ b/src/deepgram/listen/v2/socket_client.py
@@ -1,5 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
-# Enhanced with binary message support, comprehensive socket types, and send methods.
import json
import typing
@@ -9,27 +8,17 @@
import websockets.sync.connection as websockets_sync_connection
from ...core.events import EventEmitterMixin, EventType
from ...core.pydantic_utilities import parse_obj_as
+from .types.listen_v2close_stream import ListenV2CloseStream
+from .types.listen_v2connected import ListenV2Connected
+from .types.listen_v2fatal_error import ListenV2FatalError
+from .types.listen_v2turn_info import ListenV2TurnInfo
try:
from websockets.legacy.client import WebSocketClientProtocol # type: ignore
except ImportError:
from websockets import WebSocketClientProtocol # type: ignore
-# Socket message types
-from ...extensions.types.sockets import (
- ListenV2ConnectedEvent,
- ListenV2ControlMessage,
- ListenV2FatalErrorEvent,
- ListenV2MediaMessage,
- ListenV2TurnInfoEvent,
-)
-
-# Response union type (Listen V2 only receives JSON events)
-V2SocketClientResponse = typing.Union[
- ListenV2ConnectedEvent,
- ListenV2TurnInfoEvent,
- ListenV2FatalErrorEvent,
-]
+V2SocketClientResponse = typing.Union[ListenV2Connected, ListenV2TurnInfo, ListenV2FatalError]
class AsyncV2SocketClient(EventEmitterMixin):
@@ -37,37 +26,13 @@ def __init__(self, *, websocket: WebSocketClientProtocol):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
async def __aiter__(self):
async for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V2SocketClientResponse, json.loads(message)) # type: ignore
async def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
@@ -78,48 +43,49 @@ async def start_listening(self):
await self._emit_async(EventType.OPEN, None)
try:
async for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
await self._emit_async(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- await self._emit_async(EventType.ERROR, exc)
+ await self._emit_async(EventType.ERROR, exc)
finally:
await self._emit_async(EventType.CLOSE, None)
+ async def send_listen_v_2_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
+ await self._send_model(message)
+
+ async def send_listen_v_2_close_stream(self, message: ListenV2CloseStream) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV2CloseStream.
+ """
+ await self._send_model(message)
+
async def recv(self) -> V2SocketClientResponse:
"""
Receive a message from the websocket connection.
"""
data = await self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
async def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- await self._websocket.send(data)
- elif isinstance(data, dict):
- await self._websocket.send(json.dumps(data))
- else:
- await self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ await self._websocket.send(data)
async def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- await self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- async def send_control(self, message: ListenV2ControlMessage) -> None:
- """Send a control message."""
- await self._send_model(message)
-
- async def send_media(self, message: ListenV2MediaMessage) -> None:
- """Send binary audio data for transcription."""
- await self._send(message)
+ await self._send(data.dict())
class V2SocketClient(EventEmitterMixin):
@@ -127,37 +93,13 @@ def __init__(self, *, websocket: websockets_sync_connection.Connection):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
def __iter__(self):
for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V2SocketClientResponse, json.loads(message)) # type: ignore
def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
@@ -168,45 +110,46 @@ def start_listening(self):
self._emit(EventType.OPEN, None)
try:
for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
self._emit(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- self._emit(EventType.ERROR, exc)
+ self._emit(EventType.ERROR, exc)
finally:
self._emit(EventType.CLOSE, None)
+ def send_listen_v_2_media(self, message: str) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a str.
+ """
+ self._send_model(message)
+
+ def send_listen_v_2_close_stream(self, message: ListenV2CloseStream) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a ListenV2CloseStream.
+ """
+ self._send_model(message)
+
def recv(self) -> V2SocketClientResponse:
"""
Receive a message from the websocket connection.
"""
data = self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V2SocketClientResponse, json_data) # type: ignore
def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- self._websocket.send(data)
- elif isinstance(data, dict):
- self._websocket.send(json.dumps(data))
- else:
- self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ self._websocket.send(data)
def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- def send_control(self, message: ListenV2ControlMessage) -> None:
- """Send a control message."""
- self._send_model(message)
-
- def send_media(self, message: ListenV2MediaMessage) -> None:
- """Send binary audio data for transcription."""
- self._send(message)
+ self._send(data.dict())
diff --git a/src/deepgram/listen/v2/types/__init__.py b/src/deepgram/listen/v2/types/__init__.py
new file mode 100644
index 00000000..229417bf
--- /dev/null
+++ b/src/deepgram/listen/v2/types/__init__.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v2close_stream import ListenV2CloseStream
+ from .listen_v2close_stream_type import ListenV2CloseStreamType
+ from .listen_v2connected import ListenV2Connected
+ from .listen_v2fatal_error import ListenV2FatalError
+ from .listen_v2turn_info import ListenV2TurnInfo
+ from .listen_v2turn_info_event import ListenV2TurnInfoEvent
+ from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStream": ".listen_v2close_stream",
+ "ListenV2CloseStreamType": ".listen_v2close_stream_type",
+ "ListenV2Connected": ".listen_v2connected",
+ "ListenV2FatalError": ".listen_v2fatal_error",
+ "ListenV2TurnInfo": ".listen_v2turn_info",
+ "ListenV2TurnInfoEvent": ".listen_v2turn_info_event",
+ "ListenV2TurnInfoWordsItem": ".listen_v2turn_info_words_item",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2FatalError",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoWordsItem",
+]
diff --git a/src/deepgram/listen/v2/types/listen_v2close_stream.py b/src/deepgram/listen/v2/types/listen_v2close_stream.py
new file mode 100644
index 00000000..00376ced
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2close_stream.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v2close_stream_type import ListenV2CloseStreamType
+
+
+class ListenV2CloseStream(UniversalBaseModel):
+ type: ListenV2CloseStreamType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2close_stream_type.py b/src/deepgram/listen/v2/types/listen_v2close_stream_type.py
new file mode 100644
index 00000000..2ac3484e
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2close_stream_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV2CloseStreamType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v2/types/listen_v2connected.py b/src/deepgram/listen/v2/types/listen_v2connected.py
new file mode 100644
index 00000000..29108f24
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2connected.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2Connected(UniversalBaseModel):
+ type: typing.Literal["Connected"] = pydantic.Field(default="Connected")
+ """
+ Message type identifier
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `TurnInfo` messages.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2fatal_error.py b/src/deepgram/listen/v2/types/listen_v2fatal_error.py
new file mode 100644
index 00000000..1eccfabc
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2fatal_error.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2FatalError(UniversalBaseModel):
+ type: typing.Literal["Error"] = pydantic.Field(default="Error")
+ """
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `Connected` messages.
+ """
+
+ code: str = pydantic.Field()
+ """
+ A string code describing the error, e.g. `INTERNAL_SERVER_ERROR`
+ """
+
+ description: str = pydantic.Field()
+ """
+ Prose description of the error
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info.py b/src/deepgram/listen/v2/types/listen_v2turn_info.py
new file mode 100644
index 00000000..80006b6b
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info.py
@@ -0,0 +1,75 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v2turn_info_event import ListenV2TurnInfoEvent
+from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem
+
+
+class ListenV2TurnInfo(UniversalBaseModel):
+ """
+ Describes the current turn and latest state of the turn
+ """
+
+ type: typing.Literal["TurnInfo"] = "TurnInfo"
+ request_id: str = pydantic.Field()
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends to the client. This includes messages of other types, like `Connected` messages.
+ """
+
+ event: ListenV2TurnInfoEvent = pydantic.Field()
+ """
+ The type of event being reported.
+
+ - **Update** - Additional audio has been transcribed, but the turn state hasn't changed
+ - **StartOfTurn** - The user has begun speaking for the first time in the turn
+ - **EagerEndOfTurn** - The system has moderate confidence that the user has finished speaking for the turn. This is an opportunity to begin preparing an agent reply
+ - **TurnResumed** - The system detected that speech had ended and therefore sent an **EagerEndOfTurn** event, but speech is actually continuing for this turn
+ - **EndOfTurn** - The user has finished speaking for the turn
+ """
+
+ turn_index: float = pydantic.Field()
+ """
+ The index of the current turn
+ """
+
+ audio_window_start: float = pydantic.Field()
+ """
+ Start time in seconds of the audio range that was transcribed
+ """
+
+ audio_window_end: float = pydantic.Field()
+ """
+ End time in seconds of the audio range that was transcribed
+ """
+
+ transcript: str = pydantic.Field()
+ """
+ Text that was said over the course of the current turn
+ """
+
+ words: typing.List[ListenV2TurnInfoWordsItem] = pydantic.Field()
+ """
+ The words in the `transcript`
+ """
+
+ end_of_turn_confidence: float = pydantic.Field()
+ """
+ Confidence that no more speech is coming in this turn
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info_event.py b/src/deepgram/listen/v2/types/listen_v2turn_info_event.py
new file mode 100644
index 00000000..d2a0510f
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info_event.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV2TurnInfoEvent = typing.Union[
+ typing.Literal["Update", "StartOfTurn", "EagerEndOfTurn", "TurnResumed", "EndOfTurn"], typing.Any
+]
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py b/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py
new file mode 100644
index 00000000..58ae2f98
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2TurnInfoWordsItem(UniversalBaseModel):
+ word: str = pydantic.Field()
+ """
+ The individual punctuated, properly-cased word from the transcript
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ Confidence that this word was transcribed correctly
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/manage/v1/models/client.py b/src/deepgram/manage/v1/models/client.py
index bc16c9e1..9ae001ca 100644
--- a/src/deepgram/manage/v1/models/client.py
+++ b/src/deepgram/manage/v1/models/client.py
@@ -50,7 +50,9 @@ def list(
client = DeepgramClient(
api_key="YOUR_API_KEY",
)
- client.manage.v1.models.list()
+ client.manage.v1.models.list(
+ include_outdated=True,
+ )
"""
_response = self._raw_client.list(include_outdated=include_outdated, request_options=request_options)
return _response.data
@@ -133,7 +135,9 @@ async def list(
async def main() -> None:
- await client.manage.v1.models.list()
+ await client.manage.v1.models.list(
+ include_outdated=True,
+ )
asyncio.run(main())
diff --git a/src/deepgram/manage/v1/projects/billing/breakdown/client.py b/src/deepgram/manage/v1/projects/billing/breakdown/client.py
index 5ccef016..34bfbcf0 100644
--- a/src/deepgram/manage/v1/projects/billing/breakdown/client.py
+++ b/src/deepgram/manage/v1/projects/billing/breakdown/client.py
@@ -86,7 +86,10 @@ def list(
)
client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
@@ -186,7 +189,10 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
diff --git a/src/deepgram/manage/v1/projects/billing/fields/client.py b/src/deepgram/manage/v1/projects/billing/fields/client.py
index 00682a0d..749103cc 100644
--- a/src/deepgram/manage/v1/projects/billing/fields/client.py
+++ b/src/deepgram/manage/v1/projects/billing/fields/client.py
@@ -62,6 +62,8 @@ def list(
)
client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
"""
_response = self._raw_client.list(project_id, start=start, end=end, request_options=request_options)
@@ -127,6 +129,8 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
diff --git a/src/deepgram/manage/v1/projects/billing/purchases/client.py b/src/deepgram/manage/v1/projects/billing/purchases/client.py
index 7bed75c3..7afe4f92 100644
--- a/src/deepgram/manage/v1/projects/billing/purchases/client.py
+++ b/src/deepgram/manage/v1/projects/billing/purchases/client.py
@@ -58,6 +58,7 @@ def list(
)
client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
"""
_response = self._raw_client.list(project_id, limit=limit, request_options=request_options)
@@ -119,6 +120,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
diff --git a/src/deepgram/manage/v1/projects/client.py b/src/deepgram/manage/v1/projects/client.py
index 439f0220..33b0fc8c 100644
--- a/src/deepgram/manage/v1/projects/client.py
+++ b/src/deepgram/manage/v1/projects/client.py
@@ -111,6 +111,8 @@ def get(
)
client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
"""
_response = self._raw_client.get(project_id, limit=limit, page=page, request_options=request_options)
@@ -371,6 +373,8 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
diff --git a/src/deepgram/manage/v1/projects/keys/client.py b/src/deepgram/manage/v1/projects/keys/client.py
index 88885f1e..698bf841 100644
--- a/src/deepgram/manage/v1/projects/keys/client.py
+++ b/src/deepgram/manage/v1/projects/keys/client.py
@@ -66,6 +66,7 @@ def list(
)
client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
"""
_response = self._raw_client.list(project_id, status=status, request_options=request_options)
@@ -241,6 +242,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
diff --git a/src/deepgram/manage/v1/projects/models/client.py b/src/deepgram/manage/v1/projects/models/client.py
index 6ff8474f..9f78ba4d 100644
--- a/src/deepgram/manage/v1/projects/models/client.py
+++ b/src/deepgram/manage/v1/projects/models/client.py
@@ -59,6 +59,7 @@ def list(
)
client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
"""
_response = self._raw_client.list(
@@ -159,6 +160,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
diff --git a/src/deepgram/manage/v1/projects/requests/client.py b/src/deepgram/manage/v1/projects/requests/client.py
index a8e0246d..6d1435aa 100644
--- a/src/deepgram/manage/v1/projects/requests/client.py
+++ b/src/deepgram/manage/v1/projects/requests/client.py
@@ -93,6 +93,8 @@ def list(
Examples
--------
+ import datetime
+
from deepgram import DeepgramClient
client = DeepgramClient(
@@ -100,8 +102,20 @@ def list(
)
client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
"""
_response = self._raw_client.list(
@@ -238,6 +252,7 @@ async def list(
Examples
--------
import asyncio
+ import datetime
from deepgram import AsyncDeepgramClient
@@ -249,8 +264,20 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
diff --git a/src/deepgram/manage/v1/projects/usage/breakdown/client.py b/src/deepgram/manage/v1/projects/usage/breakdown/client.py
index 57532bbd..1f0822cb 100644
--- a/src/deepgram/manage/v1/projects/usage/breakdown/client.py
+++ b/src/deepgram/manage/v1/projects/usage/breakdown/client.py
@@ -238,10 +238,51 @@ def get(
)
client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
"""
_response = self._raw_client.get(
@@ -527,10 +568,51 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
diff --git a/src/deepgram/manage/v1/projects/usage/client.py b/src/deepgram/manage/v1/projects/usage/client.py
index d2c1c7e0..6ffad7f2 100644
--- a/src/deepgram/manage/v1/projects/usage/client.py
+++ b/src/deepgram/manage/v1/projects/usage/client.py
@@ -242,10 +242,50 @@ def get(
)
client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
"""
_response = self._raw_client.get(
@@ -545,10 +585,50 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
diff --git a/src/deepgram/manage/v1/projects/usage/fields/client.py b/src/deepgram/manage/v1/projects/usage/fields/client.py
index d810f50f..c1e144ab 100644
--- a/src/deepgram/manage/v1/projects/usage/fields/client.py
+++ b/src/deepgram/manage/v1/projects/usage/fields/client.py
@@ -62,6 +62,8 @@ def list(
)
client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
"""
_response = self._raw_client.list(project_id, start=start, end=end, request_options=request_options)
@@ -127,6 +129,8 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
diff --git a/src/deepgram/read/v1/text/client.py b/src/deepgram/read/v1/text/client.py
index b906e76a..d04b8a7c 100644
--- a/src/deepgram/read/v1/text/client.py
+++ b/src/deepgram/read/v1/text/client.py
@@ -108,6 +108,18 @@ def analyze(
api_key="YOUR_API_KEY",
)
client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
"""
@@ -227,6 +239,18 @@ async def analyze(
async def main() -> None:
await client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
diff --git a/src/deepgram/speak/__init__.py b/src/deepgram/speak/__init__.py
index 148ad154..73eda24c 100644
--- a/src/deepgram/speak/__init__.py
+++ b/src/deepgram/speak/__init__.py
@@ -7,7 +7,53 @@
if typing.TYPE_CHECKING:
from . import v1
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1"}
+ from .v1 import (
+ SpeakV1Clear,
+ SpeakV1ClearParams,
+ SpeakV1ClearType,
+ SpeakV1Cleared,
+ SpeakV1ClearedParams,
+ SpeakV1ClearedType,
+ SpeakV1Close,
+ SpeakV1CloseParams,
+ SpeakV1CloseType,
+ SpeakV1Flush,
+ SpeakV1FlushParams,
+ SpeakV1FlushType,
+ SpeakV1Flushed,
+ SpeakV1FlushedParams,
+ SpeakV1FlushedType,
+ SpeakV1Metadata,
+ SpeakV1MetadataParams,
+ SpeakV1Text,
+ SpeakV1TextParams,
+ SpeakV1Warning,
+ SpeakV1WarningParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1Clear": ".v1",
+ "SpeakV1ClearParams": ".v1",
+ "SpeakV1ClearType": ".v1",
+ "SpeakV1Cleared": ".v1",
+ "SpeakV1ClearedParams": ".v1",
+ "SpeakV1ClearedType": ".v1",
+ "SpeakV1Close": ".v1",
+ "SpeakV1CloseParams": ".v1",
+ "SpeakV1CloseType": ".v1",
+ "SpeakV1Flush": ".v1",
+ "SpeakV1FlushParams": ".v1",
+ "SpeakV1FlushType": ".v1",
+ "SpeakV1Flushed": ".v1",
+ "SpeakV1FlushedParams": ".v1",
+ "SpeakV1FlushedType": ".v1",
+ "SpeakV1Metadata": ".v1",
+ "SpeakV1MetadataParams": ".v1",
+ "SpeakV1Text": ".v1",
+ "SpeakV1TextParams": ".v1",
+ "SpeakV1Warning": ".v1",
+ "SpeakV1WarningParams": ".v1",
+ "v1": ".v1",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +77,27 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1"]
+__all__ = [
+ "SpeakV1Clear",
+ "SpeakV1ClearParams",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedParams",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseParams",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedParams",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1MetadataParams",
+ "SpeakV1Text",
+ "SpeakV1TextParams",
+ "SpeakV1Warning",
+ "SpeakV1WarningParams",
+ "v1",
+]
diff --git a/src/deepgram/speak/v1/__init__.py b/src/deepgram/speak/v1/__init__.py
index 40fff37f..c874ef09 100644
--- a/src/deepgram/speak/v1/__init__.py
+++ b/src/deepgram/speak/v1/__init__.py
@@ -6,6 +6,21 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ SpeakV1Clear,
+ SpeakV1ClearType,
+ SpeakV1Cleared,
+ SpeakV1ClearedType,
+ SpeakV1Close,
+ SpeakV1CloseType,
+ SpeakV1Flush,
+ SpeakV1FlushType,
+ SpeakV1Flushed,
+ SpeakV1FlushedType,
+ SpeakV1Metadata,
+ SpeakV1Text,
+ SpeakV1Warning,
+ )
from . import audio
from .audio import (
AudioGenerateRequestCallbackMethod,
@@ -13,11 +28,42 @@
AudioGenerateRequestEncoding,
AudioGenerateRequestModel,
)
+ from .requests import (
+ SpeakV1ClearParams,
+ SpeakV1ClearedParams,
+ SpeakV1CloseParams,
+ SpeakV1FlushParams,
+ SpeakV1FlushedParams,
+ SpeakV1MetadataParams,
+ SpeakV1TextParams,
+ SpeakV1WarningParams,
+ )
_dynamic_imports: typing.Dict[str, str] = {
"AudioGenerateRequestCallbackMethod": ".audio",
"AudioGenerateRequestContainer": ".audio",
"AudioGenerateRequestEncoding": ".audio",
"AudioGenerateRequestModel": ".audio",
+ "SpeakV1Clear": ".types",
+ "SpeakV1ClearParams": ".requests",
+ "SpeakV1ClearType": ".types",
+ "SpeakV1Cleared": ".types",
+ "SpeakV1ClearedParams": ".requests",
+ "SpeakV1ClearedType": ".types",
+ "SpeakV1Close": ".types",
+ "SpeakV1CloseParams": ".requests",
+ "SpeakV1CloseType": ".types",
+ "SpeakV1Flush": ".types",
+ "SpeakV1FlushParams": ".requests",
+ "SpeakV1FlushType": ".types",
+ "SpeakV1Flushed": ".types",
+ "SpeakV1FlushedParams": ".requests",
+ "SpeakV1FlushedType": ".types",
+ "SpeakV1Metadata": ".types",
+ "SpeakV1MetadataParams": ".requests",
+ "SpeakV1Text": ".types",
+ "SpeakV1TextParams": ".requests",
+ "SpeakV1Warning": ".types",
+ "SpeakV1WarningParams": ".requests",
"audio": ".audio",
}
@@ -48,5 +94,26 @@ def __dir__():
"AudioGenerateRequestContainer",
"AudioGenerateRequestEncoding",
"AudioGenerateRequestModel",
+ "SpeakV1Clear",
+ "SpeakV1ClearParams",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedParams",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseParams",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedParams",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1MetadataParams",
+ "SpeakV1Text",
+ "SpeakV1TextParams",
+ "SpeakV1Warning",
+ "SpeakV1WarningParams",
"audio",
]
diff --git a/src/deepgram/speak/v1/requests/__init__.py b/src/deepgram/speak/v1/requests/__init__.py
new file mode 100644
index 00000000..4e8e1826
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/__init__.py
@@ -0,0 +1,59 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .speak_v1clear import SpeakV1ClearParams
+ from .speak_v1cleared import SpeakV1ClearedParams
+ from .speak_v1close import SpeakV1CloseParams
+ from .speak_v1flush import SpeakV1FlushParams
+ from .speak_v1flushed import SpeakV1FlushedParams
+ from .speak_v1metadata import SpeakV1MetadataParams
+ from .speak_v1text import SpeakV1TextParams
+ from .speak_v1warning import SpeakV1WarningParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1ClearParams": ".speak_v1clear",
+ "SpeakV1ClearedParams": ".speak_v1cleared",
+ "SpeakV1CloseParams": ".speak_v1close",
+ "SpeakV1FlushParams": ".speak_v1flush",
+ "SpeakV1FlushedParams": ".speak_v1flushed",
+ "SpeakV1MetadataParams": ".speak_v1metadata",
+ "SpeakV1TextParams": ".speak_v1text",
+ "SpeakV1WarningParams": ".speak_v1warning",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "SpeakV1ClearParams",
+ "SpeakV1ClearedParams",
+ "SpeakV1CloseParams",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushedParams",
+ "SpeakV1MetadataParams",
+ "SpeakV1TextParams",
+ "SpeakV1WarningParams",
+]
diff --git a/src/deepgram/speak/v1/requests/speak_v1clear.py b/src/deepgram/speak/v1/requests/speak_v1clear.py
new file mode 100644
index 00000000..6ffc2f3e
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1clear.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1clear_type import SpeakV1ClearType
+
+
+class SpeakV1ClearParams(typing_extensions.TypedDict):
+ type: SpeakV1ClearType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1cleared.py b/src/deepgram/speak/v1/requests/speak_v1cleared.py
new file mode 100644
index 00000000..e1f1784b
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1cleared.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1cleared_type import SpeakV1ClearedType
+
+
+class SpeakV1ClearedParams(typing_extensions.TypedDict):
+ type: SpeakV1ClearedType
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ The sequence ID of the response
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1close.py b/src/deepgram/speak/v1/requests/speak_v1close.py
new file mode 100644
index 00000000..7a3219c3
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1close.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1close_type import SpeakV1CloseType
+
+
+class SpeakV1CloseParams(typing_extensions.TypedDict):
+ type: SpeakV1CloseType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1flush.py b/src/deepgram/speak/v1/requests/speak_v1flush.py
new file mode 100644
index 00000000..8bafc736
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1flush.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1flush_type import SpeakV1FlushType
+
+
+class SpeakV1FlushParams(typing_extensions.TypedDict):
+ type: SpeakV1FlushType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1flushed.py b/src/deepgram/speak/v1/requests/speak_v1flushed.py
new file mode 100644
index 00000000..674cb52d
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1flushed.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1flushed_type import SpeakV1FlushedType
+
+
+class SpeakV1FlushedParams(typing_extensions.TypedDict):
+ type: SpeakV1FlushedType
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ The sequence ID of the response
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1metadata.py b/src/deepgram/speak/v1/requests/speak_v1metadata.py
new file mode 100644
index 00000000..89fb6809
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1metadata.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1MetadataParams(typing_extensions.TypedDict):
+ type: typing.Literal["Metadata"]
+ """
+ Message type identifier
+ """
+
+ request_id: str
+ """
+ Unique identifier for the request
+ """
+
+ model_name: str
+ """
+ Name of the model being used
+ """
+
+ model_version: str
+ """
+ Version of the model being used
+ """
+
+ model_uuid: str
+ """
+ Unique identifier for the model
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1text.py b/src/deepgram/speak/v1/requests/speak_v1text.py
new file mode 100644
index 00000000..78873194
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1text.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1TextParams(typing_extensions.TypedDict):
+ type: typing.Literal["Speak"]
+ """
+ Message type identifier
+ """
+
+ text: str
+ """
+ The input text to be converted to speech
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1warning.py b/src/deepgram/speak/v1/requests/speak_v1warning.py
new file mode 100644
index 00000000..ca6c78f8
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1warning.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1WarningParams(typing_extensions.TypedDict):
+ type: typing.Literal["Warning"]
+ """
+ Message type identifier
+ """
+
+ description: str
+ """
+ A description of what went wrong
+ """
+
+ code: str
+ """
+ Error code identifying the type of error
+ """
diff --git a/src/deepgram/speak/v1/socket_client.py b/src/deepgram/speak/v1/socket_client.py
index 6d4b77aa..e370f3ca 100644
--- a/src/deepgram/speak/v1/socket_client.py
+++ b/src/deepgram/speak/v1/socket_client.py
@@ -1,5 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
-# Enhanced with binary message support, comprehensive socket types, and send methods.
import json
import typing
@@ -9,30 +8,21 @@
import websockets.sync.connection as websockets_sync_connection
from ...core.events import EventEmitterMixin, EventType
from ...core.pydantic_utilities import parse_obj_as
+from .types.speak_v1clear import SpeakV1Clear
+from .types.speak_v1cleared import SpeakV1Cleared
+from .types.speak_v1close import SpeakV1Close
+from .types.speak_v1flush import SpeakV1Flush
+from .types.speak_v1flushed import SpeakV1Flushed
+from .types.speak_v1metadata import SpeakV1Metadata
+from .types.speak_v1text import SpeakV1Text
+from .types.speak_v1warning import SpeakV1Warning
try:
from websockets.legacy.client import WebSocketClientProtocol # type: ignore
except ImportError:
from websockets import WebSocketClientProtocol # type: ignore
-# Socket message types
-from ...extensions.types.sockets import (
- SpeakV1AudioChunkEvent,
- SpeakV1ControlEvent,
- SpeakV1ControlMessage,
- SpeakV1MetadataEvent,
- SpeakV1TextMessage,
- SpeakV1WarningEvent,
-)
-
-# Response union type with binary support
-V1SocketClientResponse = typing.Union[
- SpeakV1AudioChunkEvent, # Binary audio data
- SpeakV1MetadataEvent, # JSON metadata
- SpeakV1ControlEvent, # JSON control responses (Flushed, Cleared)
- SpeakV1WarningEvent, # JSON warnings
- bytes, # Raw binary audio chunks
-]
+V1SocketClientResponse = typing.Union[str, SpeakV1Metadata, SpeakV1Flushed, SpeakV1Cleared, SpeakV1Warning]
class AsyncV1SocketClient(EventEmitterMixin):
@@ -40,90 +30,80 @@ def __init__(self, *, websocket: WebSocketClientProtocol):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is for audio chunks)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
async def __aiter__(self):
async for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
async def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
- - EventType.MESSAGE for each message received (binary or JSON)
+ - EventType.MESSAGE for each message received
- EventType.ERROR if an error occurs
- EventType.CLOSE when connection is closed
"""
await self._emit_async(EventType.OPEN, None)
try:
async for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
await self._emit_async(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- await self._emit_async(EventType.ERROR, exc)
+ await self._emit_async(EventType.ERROR, exc)
finally:
await self._emit_async(EventType.CLOSE, None)
+ async def send_speak_v_1_text(self, message: SpeakV1Text) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Text.
+ """
+ await self._send_model(message)
+
+ async def send_speak_v_1_flush(self, message: SpeakV1Flush) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Flush.
+ """
+ await self._send_model(message)
+
+ async def send_speak_v_1_clear(self, message: SpeakV1Clear) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Clear.
+ """
+ await self._send_model(message)
+
+ async def send_speak_v_1_close(self, message: SpeakV1Close) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Close.
+ """
+ await self._send_model(message)
+
async def recv(self) -> V1SocketClientResponse:
"""
Receive a message from the websocket connection.
- Handles both binary and JSON messages.
"""
data = await self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
async def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- await self._websocket.send(data)
- elif isinstance(data, dict):
- await self._websocket.send(json.dumps(data))
- else:
- await self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ await self._websocket.send(data)
async def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- await self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- async def send_text(self, message: SpeakV1TextMessage) -> None:
- """Send a text message to generate speech."""
- await self._send_model(message)
-
- async def send_control(self, message: SpeakV1ControlMessage) -> None:
- """Send a control message (flush, clear, etc.)."""
- await self._send_model(message)
+ await self._send(data.dict())
class V1SocketClient(EventEmitterMixin):
@@ -131,87 +111,77 @@ def __init__(self, *, websocket: websockets_sync_connection.Connection):
super().__init__()
self._websocket = websocket
- def _is_binary_message(self, message: typing.Any) -> bool:
- """Determine if a message is binary data."""
- return isinstance(message, (bytes, bytearray))
-
- def _handle_binary_message(self, message: bytes) -> typing.Any:
- """Handle a binary message (returns as-is for audio chunks)."""
- return message
-
- def _handle_json_message(self, message: str) -> typing.Any:
- """Handle a JSON message by parsing it."""
- json_data = json.loads(message)
- return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
-
- def _process_message(self, raw_message: typing.Any) -> typing.Tuple[typing.Any, bool]:
- """Process a raw message, detecting if it's binary or JSON."""
- if self._is_binary_message(raw_message):
- processed = self._handle_binary_message(raw_message)
- return processed, True
- else:
- processed = self._handle_json_message(raw_message)
- return processed, False
-
def __iter__(self):
for message in self._websocket:
- processed_message, _ = self._process_message(message)
- yield processed_message
+ yield parse_obj_as(V1SocketClientResponse, json.loads(message)) # type: ignore
def start_listening(self):
"""
Start listening for messages on the websocket connection.
- Handles both binary and JSON messages.
Emits events in the following order:
- EventType.OPEN when connection is established
- - EventType.MESSAGE for each message received (binary or JSON)
+ - EventType.MESSAGE for each message received
- EventType.ERROR if an error occurs
- EventType.CLOSE when connection is closed
"""
self._emit(EventType.OPEN, None)
try:
for raw_message in self._websocket:
- parsed, is_binary = self._process_message(raw_message)
+ json_data = json.loads(raw_message)
+ parsed = parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
self._emit(EventType.MESSAGE, parsed)
except (websockets.WebSocketException, JSONDecodeError) as exc:
- # Do not emit an error for a normal/clean close
- if not isinstance(exc, websockets.exceptions.ConnectionClosedOK):
- self._emit(EventType.ERROR, exc)
+ self._emit(EventType.ERROR, exc)
finally:
self._emit(EventType.CLOSE, None)
+ def send_speak_v_1_text(self, message: SpeakV1Text) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Text.
+ """
+ self._send_model(message)
+
+ def send_speak_v_1_flush(self, message: SpeakV1Flush) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Flush.
+ """
+ self._send_model(message)
+
+ def send_speak_v_1_clear(self, message: SpeakV1Clear) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Clear.
+ """
+ self._send_model(message)
+
+ def send_speak_v_1_close(self, message: SpeakV1Close) -> None:
+ """
+ Send a message to the websocket connection.
+ The message will be sent as a SpeakV1Close.
+ """
+ self._send_model(message)
+
def recv(self) -> V1SocketClientResponse:
"""
Receive a message from the websocket connection.
- Handles both binary and JSON messages.
"""
data = self._websocket.recv()
- processed_message, _ = self._process_message(data)
- return processed_message
+ json_data = json.loads(data)
+ return parse_obj_as(V1SocketClientResponse, json_data) # type: ignore
def _send(self, data: typing.Any) -> None:
"""
- Send data as binary or JSON depending on type.
+ Send a message to the websocket connection.
"""
- if isinstance(data, (bytes, bytearray)):
- self._websocket.send(data)
- elif isinstance(data, dict):
- self._websocket.send(json.dumps(data))
- else:
- self._websocket.send(data)
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ self._websocket.send(data)
def _send_model(self, data: typing.Any) -> None:
"""
Send a Pydantic model to the websocket connection.
"""
- self._send(data.dict(exclude_unset=True, exclude_none=True))
-
- # Enhanced send methods for specific message types
- def send_text(self, message: SpeakV1TextMessage) -> None:
- """Send a text message to generate speech."""
- self._send_model(message)
-
- def send_control(self, message: SpeakV1ControlMessage) -> None:
- """Send a control message (flush, clear, etc.)."""
- self._send_model(message)
+ self._send(data.dict())
diff --git a/src/deepgram/speak/v1/types/__init__.py b/src/deepgram/speak/v1/types/__init__.py
new file mode 100644
index 00000000..72a25d1b
--- /dev/null
+++ b/src/deepgram/speak/v1/types/__init__.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .speak_v1clear import SpeakV1Clear
+ from .speak_v1clear_type import SpeakV1ClearType
+ from .speak_v1cleared import SpeakV1Cleared
+ from .speak_v1cleared_type import SpeakV1ClearedType
+ from .speak_v1close import SpeakV1Close
+ from .speak_v1close_type import SpeakV1CloseType
+ from .speak_v1flush import SpeakV1Flush
+ from .speak_v1flush_type import SpeakV1FlushType
+ from .speak_v1flushed import SpeakV1Flushed
+ from .speak_v1flushed_type import SpeakV1FlushedType
+ from .speak_v1metadata import SpeakV1Metadata
+ from .speak_v1text import SpeakV1Text
+ from .speak_v1warning import SpeakV1Warning
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1Clear": ".speak_v1clear",
+ "SpeakV1ClearType": ".speak_v1clear_type",
+ "SpeakV1Cleared": ".speak_v1cleared",
+ "SpeakV1ClearedType": ".speak_v1cleared_type",
+ "SpeakV1Close": ".speak_v1close",
+ "SpeakV1CloseType": ".speak_v1close_type",
+ "SpeakV1Flush": ".speak_v1flush",
+ "SpeakV1FlushType": ".speak_v1flush_type",
+ "SpeakV1Flushed": ".speak_v1flushed",
+ "SpeakV1FlushedType": ".speak_v1flushed_type",
+ "SpeakV1Metadata": ".speak_v1metadata",
+ "SpeakV1Text": ".speak_v1text",
+ "SpeakV1Warning": ".speak_v1warning",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "SpeakV1Clear",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1Text",
+ "SpeakV1Warning",
+]
diff --git a/src/deepgram/speak/v1/types/speak_v1clear.py b/src/deepgram/speak/v1/types/speak_v1clear.py
new file mode 100644
index 00000000..b528050a
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1clear.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1clear_type import SpeakV1ClearType
+
+
+class SpeakV1Clear(UniversalBaseModel):
+ type: SpeakV1ClearType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1clear_type.py b/src/deepgram/speak/v1/types/speak_v1clear_type.py
new file mode 100644
index 00000000..93317162
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1clear_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1ClearType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/extensions/types/sockets/agent_v1_welcome_message.py b/src/deepgram/speak/v1/types/speak_v1cleared.py
similarity index 55%
rename from src/deepgram/extensions/types/sockets/agent_v1_welcome_message.py
rename to src/deepgram/speak/v1/types/speak_v1cleared.py
index 19950a5a..9e88c530 100644
--- a/src/deepgram/extensions/types/sockets/agent_v1_welcome_message.py
+++ b/src/deepgram/speak/v1/types/speak_v1cleared.py
@@ -1,25 +1,27 @@
-# Agent V1 Welcome Message - protected from auto-generation
+# This file was auto-generated by Fern from our API Definition.
import typing
import pydantic
from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1cleared_type import SpeakV1ClearedType
-class AgentV1WelcomeMessage(UniversalBaseModel):
+class SpeakV1Cleared(UniversalBaseModel):
+ type: SpeakV1ClearedType = pydantic.Field()
"""
- Confirms that the WebSocket connection has been successfully opened
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ The sequence ID of the response
"""
-
- type: typing.Literal["Welcome"]
- """Message type identifier"""
-
- request_id: str
- """Unique identifier for the request"""
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
+
class Config:
frozen = True
smart_union = True
diff --git a/src/deepgram/speak/v1/types/speak_v1cleared_type.py b/src/deepgram/speak/v1/types/speak_v1cleared_type.py
new file mode 100644
index 00000000..2e2b0158
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1cleared_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1ClearedType = typing.Union[typing.Literal["Flushed", "Cleared"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1close.py b/src/deepgram/speak/v1/types/speak_v1close.py
new file mode 100644
index 00000000..f801dc92
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1close.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1close_type import SpeakV1CloseType
+
+
+class SpeakV1Close(UniversalBaseModel):
+ type: SpeakV1CloseType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1close_type.py b/src/deepgram/speak/v1/types/speak_v1close_type.py
new file mode 100644
index 00000000..c3381c96
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1close_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1CloseType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1flush.py b/src/deepgram/speak/v1/types/speak_v1flush.py
new file mode 100644
index 00000000..bfa3f72c
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flush.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1flush_type import SpeakV1FlushType
+
+
+class SpeakV1Flush(UniversalBaseModel):
+ type: SpeakV1FlushType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1flush_type.py b/src/deepgram/speak/v1/types/speak_v1flush_type.py
new file mode 100644
index 00000000..eaf4237f
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flush_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1FlushType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1flushed.py b/src/deepgram/speak/v1/types/speak_v1flushed.py
new file mode 100644
index 00000000..6a5fd2c2
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flushed.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1flushed_type import SpeakV1FlushedType
+
+
+class SpeakV1Flushed(UniversalBaseModel):
+ type: SpeakV1FlushedType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ The sequence ID of the response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1flushed_type.py b/src/deepgram/speak/v1/types/speak_v1flushed_type.py
new file mode 100644
index 00000000..4651ac44
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flushed_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1FlushedType = typing.Union[typing.Literal["Flushed", "Cleared"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1metadata.py b/src/deepgram/speak/v1/types/speak_v1metadata.py
new file mode 100644
index 00000000..4502f0c6
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1metadata.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Metadata(UniversalBaseModel):
+ type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata")
+ """
+ Message type identifier
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ Unique identifier for the request
+ """
+
+ model_name: str = pydantic.Field()
+ """
+ Name of the model being used
+ """
+
+ model_version: str = pydantic.Field()
+ """
+ Version of the model being used
+ """
+
+ model_uuid: str = pydantic.Field()
+ """
+ Unique identifier for the model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1text.py b/src/deepgram/speak/v1/types/speak_v1text.py
new file mode 100644
index 00000000..94ec70c8
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1text.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Text(UniversalBaseModel):
+ type: typing.Literal["Speak"] = pydantic.Field(default="Speak")
+ """
+ Message type identifier
+ """
+
+ text: str = pydantic.Field()
+ """
+ The input text to be converted to speech
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1warning.py b/src/deepgram/speak/v1/types/speak_v1warning.py
new file mode 100644
index 00000000..95815596
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1warning.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Warning(UniversalBaseModel):
+ type: typing.Literal["Warning"] = pydantic.Field(default="Warning")
+ """
+ Message type identifier
+ """
+
+ description: str = pydantic.Field()
+ """
+ A description of what went wrong
+ """
+
+ code: str = pydantic.Field()
+ """
+ Error code identifying the type of error
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/tests/integrations/__init__.py b/tests/integrations/__init__.py
deleted file mode 100644
index b1b8a3cb..00000000
--- a/tests/integrations/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Integration tests for Deepgram Python SDK clients."""
diff --git a/tests/integrations/conftest.py b/tests/integrations/conftest.py
deleted file mode 100644
index 60c51220..00000000
--- a/tests/integrations/conftest.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Shared configuration and fixtures for integration tests."""
-
-import os
-import pytest
-from unittest.mock import Mock, AsyncMock
-import asyncio
-from typing import Optional, Dict, Any
-
-# Mock environment variables for testing
-TEST_API_KEY = "test_api_key_12345"
-TEST_ACCESS_TOKEN = "test_access_token_67890"
-
-@pytest.fixture(scope="session")
-def event_loop():
- """Create an instance of the default event loop for the test session."""
- loop = asyncio.new_event_loop()
- yield loop
- loop.close()
-
-@pytest.fixture
-def mock_api_key():
- """Provide a mock API key for testing."""
- return TEST_API_KEY
-
-@pytest.fixture
-def mock_access_token():
- """Provide a mock access token for testing."""
- return TEST_ACCESS_TOKEN
-
-@pytest.fixture
-def mock_env_vars(monkeypatch):
- """Mock environment variables."""
- monkeypatch.setenv("DEEPGRAM_API_KEY", TEST_API_KEY)
- monkeypatch.setenv("DEEPGRAM_ENV", "test")
-
-@pytest.fixture
-def mock_websocket():
- """Mock websocket connection for testing."""
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock()
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- return mock_ws
-
-@pytest.fixture
-def mock_async_websocket():
- """Mock async websocket connection for testing."""
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock()
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- return mock_ws
-
-@pytest.fixture
-def sample_audio_data():
- """Sample audio data for testing."""
- return b'\x00\x01\x02\x03\x04\x05' * 100 # 600 bytes of sample audio
-
-@pytest.fixture
-def sample_text():
- """Sample text for testing."""
- return "Hello, this is a test message for speech synthesis."
-
-@pytest.fixture
-def mock_http_response():
- """Mock HTTP response."""
- mock_response = Mock()
- mock_response.status_code = 200
- mock_response.json.return_value = {"success": True, "message": "Test response"}
- mock_response.headers = {"Content-Type": "application/json"}
- return mock_response
diff --git a/tests/integrations/test_advanced_features.py b/tests/integrations/test_advanced_features.py
deleted file mode 100644
index 4384a6b2..00000000
--- a/tests/integrations/test_advanced_features.py
+++ /dev/null
@@ -1,601 +0,0 @@
-"""
-Integration tests for advanced/specialized features.
-
-This module tests advanced features including:
-- Agent Settings APIs (think models, configuration)
-- Advanced Management APIs (project distribution credentials, scopes)
-- Self-hosted client features
-- Advanced telemetry and instrumentation features
-"""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch
-import httpx
-import json
-from typing import Dict, Any
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-
-# Import clients for advanced features
-from deepgram.agent.v1.settings.client import SettingsClient, AsyncSettingsClient
-from deepgram.agent.v1.settings.think.client import ThinkClient, AsyncThinkClient
-from deepgram.agent.v1.settings.think.models.client import ModelsClient as ThinkModelsClient, AsyncModelsClient as AsyncThinkModelsClient
-from deepgram.self_hosted.client import SelfHostedClient, AsyncSelfHostedClient
-
-# Import response types (if they exist)
-try:
- from deepgram.types.agent_think_models_v1response import AgentThinkModelsV1Response
-except ImportError:
- # AgentThinkModelsV1Response might not exist, create a placeholder
- AgentThinkModelsV1Response = Dict[str, Any]
-
-
-class TestAgentSettingsAPI:
- """Test Agent Settings API advanced features."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_agent_settings_client_initialization(self, sync_client_wrapper):
- """Test Agent Settings client initialization."""
- client = SettingsClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._think is None # Lazy loaded
-
- def test_async_agent_settings_client_initialization(self, async_client_wrapper):
- """Test Async Agent Settings client initialization."""
- client = AsyncSettingsClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._think is None # Lazy loaded
-
- def test_agent_settings_think_property_lazy_loading(self, sync_client_wrapper):
- """Test Agent Settings think property lazy loading."""
- client = SettingsClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._think is None
-
- # Access triggers lazy loading
- think_client = client.think
- assert client._think is not None
- assert isinstance(think_client, ThinkClient)
-
- # Subsequent access returns same instance
- assert client.think is think_client
-
- def test_async_agent_settings_think_property_lazy_loading(self, async_client_wrapper):
- """Test Async Agent Settings think property lazy loading."""
- client = AsyncSettingsClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._think is None
-
- # Access triggers lazy loading
- think_client = client.think
- assert client._think is not None
- assert isinstance(think_client, AsyncThinkClient)
-
- # Subsequent access returns same instance
- assert client.think is think_client
-
- def test_agent_think_client_initialization(self, sync_client_wrapper):
- """Test Agent Think client initialization."""
- client = ThinkClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._models is None # Lazy loaded
-
- def test_async_agent_think_client_initialization(self, async_client_wrapper):
- """Test Async Agent Think client initialization."""
- client = AsyncThinkClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._models is None # Lazy loaded
-
- def test_agent_think_models_property_lazy_loading(self, sync_client_wrapper):
- """Test Agent Think models property lazy loading."""
- client = ThinkClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._models is None
-
- # Access triggers lazy loading
- models_client = client.models
- assert client._models is not None
- assert isinstance(models_client, ThinkModelsClient)
-
- # Subsequent access returns same instance
- assert client.models is models_client
-
- def test_async_agent_think_models_property_lazy_loading(self, async_client_wrapper):
- """Test Async Agent Think models property lazy loading."""
- client = AsyncThinkClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._models is None
-
- # Access triggers lazy loading
- models_client = client.models
- assert client._models is not None
- assert isinstance(models_client, AsyncThinkModelsClient)
-
- # Subsequent access returns same instance
- assert client.models is models_client
-
- def test_agent_think_models_list(self, sync_client_wrapper):
- """Test Agent Think models list functionality."""
- # Mock the raw client's list method directly
- client = ThinkModelsClient(client_wrapper=sync_client_wrapper)
-
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = {"models": [{"id": "test-model", "name": "Test Model"}]}
-
- with patch.object(client._raw_client, 'list', return_value=mock_response) as mock_list:
- result = client.list()
-
- assert result is not None
- mock_list.assert_called_once_with(request_options=None)
-
- @pytest.mark.asyncio
- async def test_async_agent_think_models_list(self, async_client_wrapper):
- """Test Async Agent Think models list functionality."""
- # Mock the raw client's list method directly
- client = AsyncThinkModelsClient(client_wrapper=async_client_wrapper)
-
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = {"models": [{"id": "test-model", "name": "Test Model"}]}
-
- with patch.object(client._raw_client, 'list', return_value=mock_response) as mock_list:
- result = await client.list()
-
- assert result is not None
- mock_list.assert_called_once_with(request_options=None)
-
- def test_agent_think_models_list_with_request_options(self, sync_client_wrapper):
- """Test Agent Think models list with request options."""
- # Mock the raw client's list method directly
- client = ThinkModelsClient(client_wrapper=sync_client_wrapper)
-
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = {"models": []}
-
- request_options = RequestOptions(
- additional_headers={"Custom-Header": "test-value"},
- timeout_in_seconds=30.0
- )
-
- with patch.object(client._raw_client, 'list', return_value=mock_response) as mock_list:
- result = client.list(request_options=request_options)
-
- assert result is not None
- mock_list.assert_called_once_with(request_options=request_options)
-
- @patch('httpx.Client.request')
- def test_agent_think_models_list_api_error(self, mock_request, sync_client_wrapper):
- """Test Agent Think models list API error handling."""
- # Mock error response
- mock_response = Mock()
- mock_response.status_code = 401
- mock_response.json.return_value = {"error": "Unauthorized"}
- mock_response.headers = {"content-type": "application/json"}
- mock_request.return_value = mock_response
-
- client = ThinkModelsClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises((ApiError, Exception)):
- client.list()
-
- @patch('httpx.AsyncClient.request')
- @pytest.mark.asyncio
- async def test_async_agent_think_models_list_api_error(self, mock_request, async_client_wrapper):
- """Test Async Agent Think models list API error handling."""
- # Mock error response
- mock_response = Mock()
- mock_response.status_code = 500
- mock_response.json.return_value = {"error": "Internal Server Error"}
- mock_response.headers = {"content-type": "application/json"}
- mock_request.return_value = mock_response
-
- client = AsyncThinkModelsClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises((ApiError, Exception)):
- await client.list()
-
-
-class TestSelfHostedClient:
- """Test Self-hosted client advanced features."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_self_hosted_client_initialization(self, sync_client_wrapper):
- """Test Self-hosted client initialization."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_async_self_hosted_client_initialization(self, async_client_wrapper):
- """Test Async Self-hosted client initialization."""
- client = AsyncSelfHostedClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_self_hosted_client_v1_property_lazy_loading(self, sync_client_wrapper):
- """Test Self-hosted client v1 property lazy loading."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_async_self_hosted_client_v1_property_lazy_loading(self, async_client_wrapper):
- """Test Async Self-hosted client v1 property lazy loading."""
- client = AsyncSelfHostedClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_self_hosted_client_integration_with_main_client(self, mock_api_key):
- """Test Self-hosted client integration with main DeepgramClient."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access self-hosted client through main client
- self_hosted = client.self_hosted
- assert self_hosted is not None
- assert isinstance(self_hosted, SelfHostedClient)
-
- def test_async_self_hosted_client_integration_with_main_client(self, mock_api_key):
- """Test Async Self-hosted client integration with main DeepgramClient."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access self-hosted client through main client
- self_hosted = client.self_hosted
- assert self_hosted is not None
- assert isinstance(self_hosted, AsyncSelfHostedClient)
-
-
-class TestAdvancedManagementFeatures:
- """Test advanced management API features."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_project_member_scopes_client_access(self, mock_api_key):
- """Test access to project member scopes client."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access member scopes through projects client
- projects_client = client.manage.v1.projects
-
- # Try to access members and then scopes
- try:
- members_client = projects_client.members
- if members_client is not None and hasattr(members_client, 'scopes'):
- scopes_client = members_client.scopes
- assert scopes_client is not None
- except AttributeError:
- # It's acceptable if this advanced feature isn't fully implemented
- pass
-
- def test_async_project_member_scopes_client_access(self, mock_api_key):
- """Test async access to project member scopes client."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access member scopes through projects client
- projects_client = client.manage.v1.projects
-
- # Try to access members and then scopes
- try:
- members_client = projects_client.members
- if members_client is not None and hasattr(members_client, 'scopes'):
- scopes_client = members_client.scopes
- assert scopes_client is not None
- except AttributeError:
- # It's acceptable if this advanced feature isn't fully implemented
- pass
-
- def test_project_advanced_operations_availability(self, mock_api_key):
- """Test availability of advanced project operations."""
- client = DeepgramClient(api_key=mock_api_key)
- projects_client = client.manage.v1.projects
-
- # Check that advanced operations are available
- advanced_operations = [
- 'keys', 'members', 'requests', 'usage', 'billing', 'models'
- ]
-
- for operation in advanced_operations:
- assert hasattr(projects_client, operation), f"Missing {operation} operation"
-
- # Try to access the property to trigger lazy loading
- try:
- sub_client = getattr(projects_client, operation)
- assert sub_client is not None
- except Exception:
- # Some advanced features might not be fully implemented
- pass
-
- # Check that billing has purchases and balances sub-clients
- billing_client = projects_client.billing
- assert hasattr(billing_client, 'purchases'), "Missing purchases under billing"
- assert hasattr(billing_client, 'balances'), "Missing balances under billing"
-
- def test_async_project_advanced_operations_availability(self, mock_api_key):
- """Test availability of advanced project operations for async client."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
- projects_client = client.manage.v1.projects
-
- # Check that advanced operations are available
- advanced_operations = [
- 'keys', 'members', 'requests', 'usage', 'billing', 'models'
- ]
-
- for operation in advanced_operations:
- assert hasattr(projects_client, operation), f"Missing {operation} operation"
-
- # Try to access the property to trigger lazy loading
- try:
- sub_client = getattr(projects_client, operation)
- assert sub_client is not None
- except Exception:
- # Some advanced features might not be fully implemented
- pass
-
- # Check that billing has purchases and balances sub-clients
- billing_client = projects_client.billing
- assert hasattr(billing_client, 'purchases'), "Missing purchases under billing"
- assert hasattr(billing_client, 'balances'), "Missing balances under billing"
-
-
-class TestAdvancedIntegrationScenarios:
- """Test advanced integration scenarios combining multiple features."""
-
- def test_agent_settings_with_management_workflow(self, mock_api_key):
- """Test workflow combining agent settings and management APIs."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access both agent settings and management clients
- agent_settings = client.agent.v1.settings
- management = client.manage.v1
-
- assert agent_settings is not None
- assert management is not None
-
- # Verify they use the same underlying client infrastructure
- assert agent_settings._client_wrapper is not None
- assert management._client_wrapper is not None
-
- @pytest.mark.asyncio
- async def test_async_agent_settings_with_management_workflow(self, mock_api_key):
- """Test async workflow combining agent settings and management APIs."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access both agent settings and management clients
- agent_settings = client.agent.v1.settings
- management = client.manage.v1
-
- assert agent_settings is not None
- assert management is not None
-
- # Verify they use the same underlying client infrastructure
- assert agent_settings._client_wrapper is not None
- assert management._client_wrapper is not None
-
- def test_self_hosted_with_advanced_features_workflow(self, mock_api_key):
- """Test workflow combining self-hosted client with other advanced features."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access multiple advanced clients
- self_hosted = client.self_hosted
- management = client.manage
- agent = client.agent
-
- assert self_hosted is not None
- assert management is not None
- assert agent is not None
-
- # Verify all clients share the same base infrastructure
- base_clients = [self_hosted, management, agent]
- for base_client in base_clients:
- assert hasattr(base_client, '_client_wrapper')
-
- @pytest.mark.asyncio
- async def test_async_self_hosted_with_advanced_features_workflow(self, mock_api_key):
- """Test async workflow combining self-hosted client with other advanced features."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access multiple advanced clients
- self_hosted = client.self_hosted
- management = client.manage
- agent = client.agent
-
- assert self_hosted is not None
- assert management is not None
- assert agent is not None
-
- # Verify all clients share the same base infrastructure
- base_clients = [self_hosted, management, agent]
- for base_client in base_clients:
- assert hasattr(base_client, '_client_wrapper')
-
- def test_advanced_error_handling_across_features(self, mock_api_key):
- """Test error handling consistency across advanced features."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Test that all advanced clients handle initialization properly
- advanced_clients = [
- client.agent.v1.settings,
- client.manage.v1,
- client.self_hosted,
- ]
-
- for adv_client in advanced_clients:
- assert adv_client is not None
- assert hasattr(adv_client, '_client_wrapper')
-
- # Test that raw response access works
- if hasattr(adv_client, 'with_raw_response'):
- raw_client = adv_client.with_raw_response
- assert raw_client is not None
-
- @pytest.mark.asyncio
- async def test_async_advanced_error_handling_across_features(self, mock_api_key):
- """Test async error handling consistency across advanced features."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Test that all advanced clients handle initialization properly
- advanced_clients = [
- client.agent.v1.settings,
- client.manage.v1,
- client.self_hosted,
- ]
-
- for adv_client in advanced_clients:
- assert adv_client is not None
- assert hasattr(adv_client, '_client_wrapper')
-
- # Test that raw response access works
- if hasattr(adv_client, 'with_raw_response'):
- raw_client = adv_client.with_raw_response
- assert raw_client is not None
-
-
-class TestAdvancedFeatureErrorHandling:
- """Test error handling for advanced features."""
-
- @patch('httpx.Client.request')
- def test_agent_settings_network_error_handling(self, mock_request, mock_api_key):
- """Test network error handling in agent settings."""
- # Mock network error
- mock_request.side_effect = httpx.ConnectError("Connection failed")
-
- client = DeepgramClient(api_key=mock_api_key)
- think_models_client = client.agent.v1.settings.think.models
-
- with pytest.raises((httpx.ConnectError, ApiError, Exception)):
- think_models_client.list()
-
- @patch('httpx.AsyncClient.request')
- @pytest.mark.asyncio
- async def test_async_agent_settings_network_error_handling(self, mock_request, mock_api_key):
- """Test async network error handling in agent settings."""
- # Mock network error
- mock_request.side_effect = httpx.ConnectError("Connection failed")
-
- client = AsyncDeepgramClient(api_key=mock_api_key)
- think_models_client = client.agent.v1.settings.think.models
-
- with pytest.raises((httpx.ConnectError, ApiError, Exception)):
- await think_models_client.list()
-
- def test_client_wrapper_integration_across_advanced_features(self, mock_api_key):
- """Test client wrapper integration across advanced features."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Get client wrappers from different advanced features
- agent_wrapper = client.agent.v1.settings._client_wrapper
- manage_wrapper = client.manage.v1._client_wrapper
-
- # They should have the same configuration
- assert agent_wrapper.api_key == manage_wrapper.api_key
- assert agent_wrapper.get_environment() == manage_wrapper.get_environment()
-
- @pytest.mark.asyncio
- async def test_async_client_wrapper_integration_across_advanced_features(self, mock_api_key):
- """Test async client wrapper integration across advanced features."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Get client wrappers from different advanced features
- agent_wrapper = client.agent.v1.settings._client_wrapper
- manage_wrapper = client.manage.v1._client_wrapper
-
- # They should have the same configuration
- assert agent_wrapper.api_key == manage_wrapper.api_key
- assert agent_wrapper.get_environment() == manage_wrapper.get_environment()
diff --git a/tests/integrations/test_agent_client.py b/tests/integrations/test_agent_client.py
deleted file mode 100644
index 46a7f165..00000000
--- a/tests/integrations/test_agent_client.py
+++ /dev/null
@@ -1,636 +0,0 @@
-"""Integration tests for Agent client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch, MagicMock
-from contextlib import contextmanager, asynccontextmanager
-import httpx
-import websockets.exceptions
-import json
-import asyncio
-from json.decoder import JSONDecodeError
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.core.events import EventType
-from deepgram.environment import DeepgramClientEnvironment
-
-# Import Agent clients
-from deepgram.agent.client import AgentClient, AsyncAgentClient
-from deepgram.agent.v1.client import V1Client as AgentV1Client, AsyncV1Client as AgentAsyncV1Client
-
-# Import Agent raw clients
-from deepgram.agent.v1.raw_client import RawV1Client as AgentRawV1Client, AsyncRawV1Client as AgentAsyncRawV1Client
-
-# Import Agent socket clients
-from deepgram.agent.v1.socket_client import V1SocketClient as AgentV1SocketClient, AsyncV1SocketClient as AgentAsyncV1SocketClient
-
-# Import socket message types
-from deepgram.extensions.types.sockets import (
- AgentV1SettingsMessage,
- AgentV1ControlMessage,
- AgentV1MediaMessage,
-)
-
-
-class TestAgentClient:
- """Test cases for Agent Client."""
-
- def test_agent_client_initialization(self, mock_api_key):
- """Test AgentClient initialization."""
- client = DeepgramClient(api_key=mock_api_key).agent
- assert client is not None
- assert hasattr(client, 'v1')
-
- def test_async_agent_client_initialization(self, mock_api_key):
- """Test AsyncAgentClient initialization."""
- client = AsyncDeepgramClient(api_key=mock_api_key).agent
- assert client is not None
- assert hasattr(client, 'v1')
-
- def test_agent_client_with_raw_response(self, mock_api_key):
- """Test AgentClient with_raw_response property."""
- client = DeepgramClient(api_key=mock_api_key).agent
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
- def test_async_agent_client_with_raw_response(self, mock_api_key):
- """Test AsyncAgentClient with_raw_response property."""
- client = AsyncDeepgramClient(api_key=mock_api_key).agent
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
-
-class TestAgentRawV1Client:
- """Test cases for Agent V1 Raw Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_sync_agent_raw_client_initialization(self, sync_client_wrapper):
- """Test synchronous agent raw client initialization."""
- client = AgentRawV1Client(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
-
- def test_async_agent_raw_client_initialization(self, async_client_wrapper):
- """Test asynchronous agent raw client initialization."""
- client = AgentAsyncRawV1Client(client_wrapper=async_client_wrapper)
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
-
- @patch('websockets.sync.client.connect')
- def test_sync_agent_connect_success(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test successful synchronous Agent WebSocket connection."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = AgentRawV1Client(client_wrapper=sync_client_wrapper)
-
- with client.connect() as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- @patch('deepgram.agent.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_agent_connect_success(self, mock_websocket_connect, async_client_wrapper, mock_async_websocket):
- """Test successful asynchronous Agent WebSocket connection."""
- mock_websocket_connect.return_value.__aenter__ = AsyncMock(return_value=mock_async_websocket)
- mock_websocket_connect.return_value.__aexit__ = AsyncMock(return_value=None)
-
- client = AgentAsyncRawV1Client(client_wrapper=async_client_wrapper)
-
- async with client.connect() as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- def test_agent_url_construction(self, sync_client_wrapper):
- """Test Agent WebSocket URL construction."""
- client = AgentRawV1Client(client_wrapper=sync_client_wrapper)
-
- # Mock the websocket connection to capture the URL
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.return_value.__enter__ = Mock(return_value=Mock())
- mock_connect.return_value.__exit__ = Mock(return_value=None)
-
- try:
- with client.connect() as connection:
- pass
- except:
- pass # We just want to check the URL construction
-
- # Verify the URL was constructed for Agent endpoint
- call_args = mock_connect.call_args
- if call_args and len(call_args[0]) > 0:
- url = call_args[0][0]
- assert "agent" in url.lower()
-
-
-class TestAgentV1SocketClient:
- """Test cases for Agent V1 Socket Client."""
-
- def test_agent_sync_socket_client_initialization(self):
- """Test Agent synchronous socket client initialization."""
- mock_ws = Mock()
- client = AgentV1SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_agent_async_socket_client_initialization(self):
- """Test Agent asynchronous socket client initialization."""
- mock_ws = AsyncMock()
- client = AgentAsyncV1SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_agent_sync_send_settings(self):
- """Test Agent synchronous settings message sending."""
- mock_ws = Mock()
- client = AgentV1SocketClient(websocket=mock_ws)
-
- # Mock settings message
- mock_settings_msg = Mock(spec=AgentV1SettingsMessage)
- mock_settings_msg.dict.return_value = {"type": "SettingsConfiguration"}
-
- client.send_settings(mock_settings_msg)
-
- mock_settings_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- def test_agent_sync_send_control(self):
- """Test Agent synchronous control message sending."""
- mock_ws = Mock()
- client = AgentV1SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=AgentV1ControlMessage)
- mock_control_msg.dict.return_value = {"type": "KeepAlive"}
-
- client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- def test_agent_sync_send_media(self, sample_audio_data):
- """Test Agent synchronous media message sending."""
- mock_ws = Mock()
- client = AgentV1SocketClient(websocket=mock_ws)
-
- client.send_media(sample_audio_data)
-
- mock_ws.send.assert_called_once_with(sample_audio_data)
-
- @pytest.mark.asyncio
- async def test_agent_async_send_settings(self):
- """Test Agent asynchronous settings message sending."""
- mock_ws = AsyncMock()
- client = AgentAsyncV1SocketClient(websocket=mock_ws)
-
- # Mock settings message
- mock_settings_msg = Mock(spec=AgentV1SettingsMessage)
- mock_settings_msg.dict.return_value = {"type": "SettingsConfiguration"}
-
- await client.send_settings(mock_settings_msg)
-
- mock_settings_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_agent_async_send_control(self):
- """Test Agent asynchronous control message sending."""
- mock_ws = AsyncMock()
- client = AgentAsyncV1SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=AgentV1ControlMessage)
- mock_control_msg.dict.return_value = {"type": "KeepAlive"}
-
- await client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_agent_async_send_media(self, sample_audio_data):
- """Test Agent asynchronous media message sending."""
- mock_ws = AsyncMock()
- client = AgentAsyncV1SocketClient(websocket=mock_ws)
-
- await client.send_media(sample_audio_data)
-
- mock_ws.send.assert_called_once_with(sample_audio_data)
-
-
-class TestAgentIntegrationScenarios:
- """Test Agent API integration scenarios."""
-
- @patch('websockets.sync.client.connect')
- def test_agent_conversation_workflow(self, mock_websocket_connect, mock_api_key, sample_audio_data):
- """Test complete Agent conversation workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=[
- '{"type": "Welcome", "request_id": "req-123"}',
- '{"type": "ConversationText", "role": "assistant", "content": "Hello!"}',
- b'\x00\x01\x02\x03' # Audio chunk
- ])
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Welcome", "request_id": "req-123"}',
- '{"type": "ConversationText", "role": "assistant", "content": "Hello!"}',
- b'\x00\x01\x02\x03' # Audio chunk
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect and interact with agent
- with client.agent.v1.with_raw_response.connect() as connection:
- # Send settings
- connection.send_settings(Mock())
-
- # Send control message
- connection.send_control(Mock())
-
- # Send audio data
- connection.send_media(sample_audio_data)
-
- # Receive agent response
- result = connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('websockets.sync.client.connect')
- def test_agent_function_call_workflow(self, mock_websocket_connect, mock_api_key):
- """Test Agent function call workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=[
- '{"type": "Welcome", "request_id": "func-req-123"}',
- '{"type": "FunctionCallRequest", "function_name": "get_weather", "arguments": {"location": "New York"}}'
- ])
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Welcome", "request_id": "func-req-123"}',
- '{"type": "FunctionCallRequest", "function_name": "get_weather", "arguments": {"location": "New York"}}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect and handle function calls
- with client.agent.v1.with_raw_response.connect() as connection:
- # Send settings with function definitions
- connection.send_settings(Mock())
-
- # Send user message that triggers function call
- connection.send_media(b'User asks about weather')
-
- # Receive function call request
- result = connection.recv()
- assert result is not None
-
- # Send function call response
- connection.send_control(Mock()) # Function response message
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('websockets.sync.client.connect')
- def test_agent_event_driven_workflow(self, mock_websocket_connect, mock_api_key):
- """Test Agent event-driven workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Welcome", "request_id": "event-agent-123"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Mock event handlers
- on_open = Mock()
- on_message = Mock()
- on_close = Mock()
- on_error = Mock()
-
- # Connect with event handlers
- with client.agent.v1.with_raw_response.connect() as connection:
- # Set up event handlers
- connection.on(EventType.OPEN, on_open)
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, on_close)
- connection.on(EventType.ERROR, on_error)
-
- # Start listening (this will process the mock messages)
- connection.start_listening()
-
- # Verify event handlers were set up
- assert hasattr(connection, 'on')
-
- @patch('deepgram.agent.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_agent_conversation_workflow(self, mock_websocket_connect, mock_api_key, sample_audio_data):
- """Test async Agent conversation workflow."""
- # Mock async websocket connection
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock(side_effect=[
- '{"type": "Welcome", "request_id": "async-agent-123"}',
- '{"type": "ConversationText", "role": "assistant", "content": "Hello from async agent!"}'
- ])
-
- async def mock_aiter():
- yield '{"type": "Welcome", "request_id": "async-agent-123"}'
- yield '{"type": "ConversationText", "role": "assistant", "content": "Hello from async agent!"}'
-
- mock_ws.__aiter__ = Mock(return_value=mock_aiter())
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Connect and interact with agent
- async with client.agent.v1.with_raw_response.connect() as connection:
- # Send settings
- await connection.send_settings(Mock())
-
- # Send control message
- await connection.send_control(Mock())
-
- # Send audio data
- await connection.send_media(sample_audio_data)
-
- # Receive agent response
- result = await connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('deepgram.agent.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_agent_function_call_workflow(self, mock_websocket_connect, mock_api_key):
- """Test async Agent function call workflow."""
- # Mock async websocket connection
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock(side_effect=[
- '{"type": "Welcome", "request_id": "async-func-123"}',
- '{"type": "FunctionCallRequest", "function_name": "get_weather", "arguments": {"location": "San Francisco"}}'
- ])
-
- async def mock_aiter():
- yield '{"type": "Welcome", "request_id": "async-func-123"}'
- yield '{"type": "FunctionCallRequest", "function_name": "get_weather", "arguments": {"location": "San Francisco"}}'
-
- mock_ws.__aiter__ = Mock(return_value=mock_aiter())
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Connect and handle function calls
- async with client.agent.v1.with_raw_response.connect() as connection:
- # Send settings with function definitions
- await connection.send_settings(Mock())
-
- # Send user message that triggers function call
- await connection.send_media(b'User asks about weather in SF')
-
- # Receive function call request
- result = await connection.recv()
- assert result is not None
-
- # Send function call response
- await connection.send_control(Mock()) # Function response message
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- def test_complete_agent_workflow_sync(self, mock_api_key):
- """Test complete Agent workflow using sync client."""
- with patch('websockets.sync.client.connect') as mock_websocket_connect:
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Welcome", "request_id": "complete-sync-123"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested agent functionality
- with client.agent.v1.with_raw_response.connect() as connection:
- # Send initial settings
- connection.send_settings(Mock())
-
- # Send user audio
- connection.send_media(b'Hello agent')
-
- # Process response
- for message in connection:
- if isinstance(message, dict) and message.get('type') == 'Welcome':
- break
-
- # Verify the connection was established
- mock_websocket_connect.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_agent_workflow_async(self, mock_api_key):
- """Test complete Agent workflow using async client."""
- with patch('deepgram.agent.v1.raw_client.websockets_client_connect') as mock_websocket_connect:
- # Mock async websocket connection
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
-
- async def mock_aiter():
- yield '{"type": "Welcome", "request_id": "complete-async-123"}'
-
- mock_ws.__aiter__ = Mock(return_value=mock_aiter())
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested agent functionality
- async with client.agent.v1.with_raw_response.connect() as connection:
- # Send initial settings
- await connection.send_settings(Mock())
-
- # Send user audio
- await connection.send_media(b'Hello async agent')
-
- # Process response
- async for message in connection:
- if isinstance(message, dict) and message.get('type') == 'Welcome':
- break
-
- # Verify the connection was established
- mock_websocket_connect.assert_called_once()
-
- def test_agent_client_property_isolation(self, mock_api_key):
- """Test that agent clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- # Verify clients are different instances
- assert client1.agent is not client2.agent
-
- # Verify nested clients are also different
- agent1 = client1.agent.v1
- agent2 = client2.agent.v1
-
- assert agent1 is not agent2
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_agent_clients(self, mock_api_key):
- """Test mixing sync and async agent clients."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Verify clients are different types
- assert type(sync_client.agent) != type(async_client.agent)
-
- # Verify nested clients are also different types
- sync_agent = sync_client.agent.v1
- async_agent = async_client.agent.v1
-
- assert type(sync_agent) != type(async_agent)
- assert isinstance(sync_agent, AgentV1Client)
- assert isinstance(async_agent, AgentAsyncV1Client)
-
-
-class TestAgentErrorHandling:
- """Test Agent client error handling."""
-
- @patch('websockets.sync.client.connect')
- def test_websocket_connection_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test WebSocket connection error handling."""
- mock_websocket_connect.side_effect = websockets.exceptions.ConnectionClosedError(None, None)
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(websockets.exceptions.ConnectionClosedError):
- with client.agent.v1.with_raw_response.connect() as connection:
- pass
-
- @patch('websockets.sync.client.connect')
- def test_generic_websocket_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test generic WebSocket error handling."""
- mock_websocket_connect.side_effect = Exception("Generic Agent WebSocket error")
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(Exception) as exc_info:
- with client.agent.v1.with_raw_response.connect() as connection:
- pass
-
- assert "Generic Agent WebSocket error" in str(exc_info.value)
-
- @patch('deepgram.agent.v1.raw_client.websockets_sync_client.connect')
- def test_agent_invalid_credentials_error(self, mock_websocket_connect, mock_api_key):
- """Test Agent connection with invalid credentials."""
- mock_websocket_connect.side_effect = websockets.exceptions.InvalidStatusCode(
- status_code=401, headers={}
- )
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(ApiError) as exc_info:
- with client.agent.v1.with_raw_response.connect() as connection:
- pass
-
- assert exc_info.value.status_code == 401
- assert "invalid credentials" in exc_info.value.body.lower()
-
- @patch('deepgram.agent.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_websocket_connection_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test async WebSocket connection error handling."""
- mock_websocket_connect.side_effect = websockets.exceptions.ConnectionClosedError(None, None)
-
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(websockets.exceptions.ConnectionClosedError):
- async with client.agent.v1.with_raw_response.connect() as connection:
- pass
-
- def test_client_wrapper_integration(self, mock_api_key):
- """Test integration with client wrapper."""
- client = DeepgramClient(api_key=mock_api_key).agent
- assert client._client_wrapper is not None
- assert client._client_wrapper.api_key == mock_api_key
-
- def test_socket_client_error_scenarios(self, sample_audio_data):
- """Test Agent socket client error scenarios."""
- mock_ws = Mock()
- mock_ws.send = Mock(side_effect=Exception("Send error"))
-
- client = AgentV1SocketClient(websocket=mock_ws)
-
- # Test that send errors are properly propagated
- with pytest.raises(Exception) as exc_info:
- client.send_media(sample_audio_data)
-
- assert "Send error" in str(exc_info.value)
-
- @pytest.mark.asyncio
- async def test_async_socket_client_error_scenarios(self, sample_audio_data):
- """Test async Agent socket client error scenarios."""
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock(side_effect=Exception("Async send error"))
-
- client = AgentAsyncV1SocketClient(websocket=mock_ws)
-
- # Test that async send errors are properly propagated
- with pytest.raises(Exception) as exc_info:
- await client.send_media(sample_audio_data)
-
- assert "Async send error" in str(exc_info.value)
diff --git a/tests/integrations/test_auth_client.py b/tests/integrations/test_auth_client.py
deleted file mode 100644
index 22fbb99f..00000000
--- a/tests/integrations/test_auth_client.py
+++ /dev/null
@@ -1,597 +0,0 @@
-"""Integration tests for Auth client implementations."""
-
-import pytest
-import httpx
-from unittest.mock import Mock, AsyncMock, patch
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-from deepgram.types.grant_v1response import GrantV1Response
-
-from deepgram.auth.client import AuthClient, AsyncAuthClient
-from deepgram.auth.v1.client import V1Client as AuthV1Client, AsyncV1Client as AuthAsyncV1Client
-from deepgram.auth.v1.tokens.client import TokensClient, AsyncTokensClient
-
-
-class TestAuthClient:
- """Test cases for Auth Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_auth_client_initialization(self, sync_client_wrapper):
- """Test AuthClient initialization."""
- client = AuthClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_async_auth_client_initialization(self, async_client_wrapper):
- """Test AsyncAuthClient initialization."""
- client = AsyncAuthClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_auth_client_v1_property_lazy_loading(self, sync_client_wrapper):
- """Test AuthClient v1 property lazy loading."""
- client = AuthClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, AuthV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_async_auth_client_v1_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncAuthClient v1 property lazy loading."""
- client = AsyncAuthClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, AuthAsyncV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_auth_client_raw_response_access(self, sync_client_wrapper):
- """Test AuthClient raw response access."""
- client = AuthClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_auth_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncAuthClient raw response access."""
- client = AsyncAuthClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_auth_client_integration_with_main_client(self, mock_api_key):
- """Test AuthClient integration with main DeepgramClient."""
- client = DeepgramClient(api_key=mock_api_key)
-
- auth_client = client.auth
- assert auth_client is not None
- assert isinstance(auth_client, AuthClient)
-
- def test_async_auth_client_integration_with_main_client(self, mock_api_key):
- """Test AsyncAuthClient integration with main AsyncDeepgramClient."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- auth_client = client.auth
- assert auth_client is not None
- assert isinstance(auth_client, AsyncAuthClient)
-
-
-class TestAuthV1Client:
- """Test cases for Auth V1 Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_auth_v1_client_initialization(self, sync_client_wrapper):
- """Test AuthV1Client initialization."""
- client = AuthV1Client(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._tokens is None # Lazy loaded
-
- def test_async_auth_v1_client_initialization(self, async_client_wrapper):
- """Test AsyncAuthV1Client initialization."""
- client = AuthAsyncV1Client(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._tokens is None # Lazy loaded
-
- def test_auth_v1_client_tokens_property_lazy_loading(self, sync_client_wrapper):
- """Test AuthV1Client tokens property lazy loading."""
- client = AuthV1Client(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._tokens is None
-
- # Access triggers lazy loading
- tokens_client = client.tokens
- assert client._tokens is not None
- assert isinstance(tokens_client, TokensClient)
-
- # Subsequent access returns same instance
- assert client.tokens is tokens_client
-
- def test_async_auth_v1_client_tokens_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncAuthV1Client tokens property lazy loading."""
- client = AuthAsyncV1Client(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._tokens is None
-
- # Access triggers lazy loading
- tokens_client = client.tokens
- assert client._tokens is not None
- assert isinstance(tokens_client, AsyncTokensClient)
-
- # Subsequent access returns same instance
- assert client.tokens is tokens_client
-
- def test_auth_v1_client_raw_response_access(self, sync_client_wrapper):
- """Test AuthV1Client raw response access."""
- client = AuthV1Client(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_auth_v1_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncAuthV1Client raw response access."""
- client = AuthAsyncV1Client(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
-
-class TestTokensClient:
- """Test cases for Tokens Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_grant_response(self):
- """Mock grant response data."""
- return {
- "access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9...",
- "expires_in": 30
- }
-
- def test_tokens_client_initialization(self, sync_client_wrapper):
- """Test TokensClient initialization."""
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_tokens_client_initialization(self, async_client_wrapper):
- """Test AsyncTokensClient initialization."""
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_tokens_client_raw_response_access(self, sync_client_wrapper):
- """Test TokensClient raw response access."""
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_tokens_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncTokensClient raw response access."""
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant')
- def test_tokens_client_grant_default_ttl(self, mock_grant, sync_client_wrapper, mock_grant_response):
- """Test TokensClient grant with default TTL."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- result = client.grant()
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
- assert result.access_token == mock_grant_response["access_token"]
- assert result.expires_in == mock_grant_response["expires_in"]
-
- # Verify raw client was called with correct parameters
- mock_grant.assert_called_once_with(ttl_seconds=..., request_options=None)
-
- @patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant')
- def test_tokens_client_grant_custom_ttl(self, mock_grant, sync_client_wrapper, mock_grant_response):
- """Test TokensClient grant with custom TTL."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- custom_ttl = 60
- result = client.grant(ttl_seconds=custom_ttl)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
-
- # Verify raw client was called with custom TTL
- mock_grant.assert_called_once_with(ttl_seconds=custom_ttl, request_options=None)
-
- @patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant')
- def test_tokens_client_grant_with_request_options(self, mock_grant, sync_client_wrapper, mock_grant_response):
- """Test TokensClient grant with request options."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- request_options = RequestOptions(
- additional_headers={"X-Custom-Header": "test-value"}
- )
- result = client.grant(ttl_seconds=45, request_options=request_options)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
-
- # Verify raw client was called with request options
- mock_grant.assert_called_once_with(ttl_seconds=45, request_options=request_options)
-
- @patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant')
- @pytest.mark.asyncio
- async def test_async_tokens_client_grant_default_ttl(self, mock_grant, async_client_wrapper, mock_grant_response):
- """Test AsyncTokensClient grant with default TTL."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- result = await client.grant()
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
- assert result.access_token == mock_grant_response["access_token"]
- assert result.expires_in == mock_grant_response["expires_in"]
-
- # Verify async raw client was called with correct parameters
- mock_grant.assert_called_once_with(ttl_seconds=..., request_options=None)
-
- @patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant')
- @pytest.mark.asyncio
- async def test_async_tokens_client_grant_custom_ttl(self, mock_grant, async_client_wrapper, mock_grant_response):
- """Test AsyncTokensClient grant with custom TTL."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- custom_ttl = 120
- result = await client.grant(ttl_seconds=custom_ttl)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
-
- # Verify async raw client was called with custom TTL
- mock_grant.assert_called_once_with(ttl_seconds=custom_ttl, request_options=None)
-
- @patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant')
- @pytest.mark.asyncio
- async def test_async_tokens_client_grant_with_request_options(self, mock_grant, async_client_wrapper, mock_grant_response):
- """Test AsyncTokensClient grant with request options."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = GrantV1Response(**mock_grant_response)
- mock_grant.return_value = mock_response
-
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- request_options = RequestOptions(
- additional_headers={"X-Custom-Header": "async-test-value"}
- )
- result = await client.grant(ttl_seconds=90, request_options=request_options)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
-
- # Verify async raw client was called with request options
- mock_grant.assert_called_once_with(ttl_seconds=90, request_options=request_options)
-
-
-class TestAuthIntegrationScenarios:
- """Test Auth integration scenarios."""
-
- def test_complete_auth_workflow_sync(self, mock_api_key):
- """Test complete Auth workflow using sync client."""
- with patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant') as mock_grant:
- # Mock the response
- mock_response = Mock()
- mock_response.data = GrantV1Response(
- access_token="eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9...",
- expires_in=30
- )
- mock_grant.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested auth functionality
- result = client.auth.v1.tokens.grant(ttl_seconds=60)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
- assert result.access_token is not None
- assert result.expires_in == 30
-
- # Verify the call was made
- mock_grant.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_auth_workflow_async(self, mock_api_key):
- """Test complete Auth workflow using async client."""
- with patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant') as mock_grant:
- # Mock the async response
- mock_response = Mock()
- mock_response.data = GrantV1Response(
- access_token="eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9...",
- expires_in=60
- )
- mock_grant.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested auth functionality
- result = await client.auth.v1.tokens.grant(ttl_seconds=120)
-
- assert result is not None
- assert isinstance(result, GrantV1Response)
- assert result.access_token is not None
- assert result.expires_in == 60
-
- # Verify the call was made
- mock_grant.assert_called_once()
-
- def test_auth_client_property_isolation(self, mock_api_key):
- """Test that auth clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- auth1 = client1.auth
- auth2 = client2.auth
-
- # Verify they are different instances
- assert auth1 is not auth2
- assert auth1._client_wrapper is not auth2._client_wrapper
-
- # Verify nested clients are also different
- tokens1 = auth1.v1.tokens
- tokens2 = auth2.v1.tokens
-
- assert tokens1 is not tokens2
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_auth_clients(self, mock_api_key):
- """Test mixing sync and async auth clients."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- sync_auth = sync_client.auth
- async_auth = async_client.auth
-
- # Verify they are different types
- assert type(sync_auth) != type(async_auth)
- assert isinstance(sync_auth, AuthClient)
- assert isinstance(async_auth, AsyncAuthClient)
-
- # Verify nested clients are also different types
- sync_tokens = sync_auth.v1.tokens
- async_tokens = async_auth.v1.tokens
-
- assert type(sync_tokens) != type(async_tokens)
- assert isinstance(sync_tokens, TokensClient)
- assert isinstance(async_tokens, AsyncTokensClient)
-
-
-class TestAuthErrorHandling:
- """Test Auth client error handling."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant')
- def test_tokens_client_api_error_handling(self, mock_grant, sync_client_wrapper):
- """Test TokensClient API error handling."""
- # Mock an API error
- mock_grant.side_effect = ApiError(
- status_code=401,
- headers={},
- body="Invalid API key"
- )
-
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- client.grant()
-
- assert exc_info.value.status_code == 401
- assert "Invalid API key" in str(exc_info.value.body)
-
- @patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant')
- @pytest.mark.asyncio
- async def test_async_tokens_client_api_error_handling(self, mock_grant, async_client_wrapper):
- """Test AsyncTokensClient API error handling."""
- # Mock an API error
- mock_grant.side_effect = ApiError(
- status_code=403,
- headers={},
- body="Insufficient permissions"
- )
-
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- await client.grant()
-
- assert exc_info.value.status_code == 403
- assert "Insufficient permissions" in str(exc_info.value.body)
-
- @patch('deepgram.auth.v1.tokens.raw_client.RawTokensClient.grant')
- def test_tokens_client_network_error_handling(self, mock_grant, sync_client_wrapper):
- """Test TokensClient network error handling."""
- # Mock a network error
- mock_grant.side_effect = httpx.ConnectError("Connection failed")
-
- client = TokensClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(httpx.ConnectError):
- client.grant()
-
- @patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant')
- @pytest.mark.asyncio
- async def test_async_tokens_client_network_error_handling(self, mock_grant, async_client_wrapper):
- """Test AsyncTokensClient network error handling."""
- # Mock a network error
- mock_grant.side_effect = httpx.ConnectError("Async connection failed")
-
- client = AsyncTokensClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises(httpx.ConnectError):
- await client.grant()
-
- def test_client_wrapper_integration(self, sync_client_wrapper):
- """Test integration with client wrapper."""
- client = AuthClient(client_wrapper=sync_client_wrapper)
-
- # Test that client wrapper methods are accessible
- assert hasattr(client._client_wrapper, 'get_environment')
- assert hasattr(client._client_wrapper, 'get_headers')
- assert hasattr(client._client_wrapper, 'api_key')
-
- environment = client._client_wrapper.get_environment()
- headers = client._client_wrapper.get_headers()
- api_key = client._client_wrapper.api_key
-
- assert environment is not None
- assert isinstance(headers, dict)
- assert api_key is not None
diff --git a/tests/integrations/test_base_client.py b/tests/integrations/test_base_client.py
deleted file mode 100644
index c293fb91..00000000
--- a/tests/integrations/test_base_client.py
+++ /dev/null
@@ -1,217 +0,0 @@
-"""Integration tests for BaseClient and AsyncBaseClient."""
-
-import pytest
-from unittest.mock import Mock, patch
-import httpx
-
-from deepgram.base_client import BaseClient, AsyncBaseClient
-from deepgram.environment import DeepgramClientEnvironment
-from deepgram.core.api_error import ApiError
-
-
-class TestBaseClient:
- """Test cases for BaseClient."""
-
- def test_base_client_initialization(self, mock_api_key):
- """Test BaseClient initialization."""
- client = BaseClient(api_key=mock_api_key)
-
- assert client is not None
- assert client._client_wrapper is not None
-
- def test_base_client_initialization_without_api_key(self):
- """Test BaseClient initialization fails without API key."""
- with patch.dict('os.environ', {}, clear=True):
- with pytest.raises(ApiError) as exc_info:
- BaseClient()
-
- assert "api_key" in str(exc_info.value.body).lower()
-
- def test_base_client_with_environment(self, mock_api_key):
- """Test BaseClient with specific environment."""
- client = BaseClient(
- api_key=mock_api_key,
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- assert client is not None
-
- def test_base_client_with_custom_headers(self, mock_api_key):
- """Test BaseClient with custom headers."""
- headers = {"X-Custom-Header": "test-value"}
- client = BaseClient(api_key=mock_api_key, headers=headers)
-
- assert client is not None
-
- def test_base_client_with_timeout(self, mock_api_key):
- """Test BaseClient with custom timeout."""
- client = BaseClient(api_key=mock_api_key, timeout=120.0)
-
- assert client is not None
-
- def test_base_client_with_follow_redirects(self, mock_api_key):
- """Test BaseClient with follow_redirects setting."""
- client = BaseClient(api_key=mock_api_key, follow_redirects=False)
-
- assert client is not None
-
- def test_base_client_with_custom_httpx_client(self, mock_api_key):
- """Test BaseClient with custom httpx client."""
- custom_client = httpx.Client(timeout=30.0)
- client = BaseClient(api_key=mock_api_key, httpx_client=custom_client)
-
- assert client is not None
-
- def test_base_client_property_access(self, mock_api_key):
- """Test BaseClient property access."""
- client = BaseClient(api_key=mock_api_key)
-
- # Test that all properties are accessible
- assert client.agent is not None
- assert client.auth is not None
- assert client.listen is not None
- assert client.manage is not None
- assert client.read is not None
- assert client.self_hosted is not None
- assert client.speak is not None
-
- def test_base_client_timeout_defaulting(self, mock_api_key):
- """Test BaseClient timeout defaulting behavior."""
- # Test with no timeout specified
- client = BaseClient(api_key=mock_api_key)
- assert client is not None
-
- # Test with custom httpx client that has timeout
- custom_client = httpx.Client(timeout=45.0)
- client = BaseClient(api_key=mock_api_key, httpx_client=custom_client)
- assert client is not None
-
-
-class TestAsyncBaseClient:
- """Test cases for AsyncBaseClient."""
-
- def test_async_base_client_initialization(self, mock_api_key):
- """Test AsyncBaseClient initialization."""
- client = AsyncBaseClient(api_key=mock_api_key)
-
- assert client is not None
- assert client._client_wrapper is not None
-
- def test_async_base_client_initialization_without_api_key(self):
- """Test AsyncBaseClient initialization fails without API key."""
- with patch.dict('os.environ', {}, clear=True):
- with pytest.raises(ApiError) as exc_info:
- AsyncBaseClient()
-
- assert "api_key" in str(exc_info.value.body).lower()
-
- def test_async_base_client_with_environment(self, mock_api_key):
- """Test AsyncBaseClient with specific environment."""
- client = AsyncBaseClient(
- api_key=mock_api_key,
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- assert client is not None
-
- def test_async_base_client_with_custom_headers(self, mock_api_key):
- """Test AsyncBaseClient with custom headers."""
- headers = {"X-Custom-Header": "test-value"}
- client = AsyncBaseClient(api_key=mock_api_key, headers=headers)
-
- assert client is not None
-
- def test_async_base_client_with_timeout(self, mock_api_key):
- """Test AsyncBaseClient with custom timeout."""
- client = AsyncBaseClient(api_key=mock_api_key, timeout=120.0)
-
- assert client is not None
-
- def test_async_base_client_with_follow_redirects(self, mock_api_key):
- """Test AsyncBaseClient with follow_redirects setting."""
- client = AsyncBaseClient(api_key=mock_api_key, follow_redirects=False)
-
- assert client is not None
-
- def test_async_base_client_with_custom_httpx_client(self, mock_api_key):
- """Test AsyncBaseClient with custom httpx async client."""
- custom_client = httpx.AsyncClient(timeout=30.0)
- client = AsyncBaseClient(api_key=mock_api_key, httpx_client=custom_client)
-
- assert client is not None
-
- def test_async_base_client_property_access(self, mock_api_key):
- """Test AsyncBaseClient property access."""
- client = AsyncBaseClient(api_key=mock_api_key)
-
- # Test that all properties are accessible
- assert client.agent is not None
- assert client.auth is not None
- assert client.listen is not None
- assert client.manage is not None
- assert client.read is not None
- assert client.self_hosted is not None
- assert client.speak is not None
-
- def test_async_base_client_timeout_defaulting(self, mock_api_key):
- """Test AsyncBaseClient timeout defaulting behavior."""
- # Test with no timeout specified
- client = AsyncBaseClient(api_key=mock_api_key)
- assert client is not None
-
- # Test with custom httpx client that has timeout
- custom_client = httpx.AsyncClient(timeout=45.0)
- client = AsyncBaseClient(api_key=mock_api_key, httpx_client=custom_client)
- assert client is not None
-
-
-class TestBaseClientWrapperIntegration:
- """Test BaseClient integration with client wrapper."""
-
- def test_sync_client_wrapper_creation(self, mock_api_key):
- """Test synchronous client wrapper creation."""
- client = BaseClient(api_key=mock_api_key)
-
- wrapper = client._client_wrapper
- assert wrapper is not None
- assert hasattr(wrapper, 'get_environment')
- assert hasattr(wrapper, 'get_headers')
- assert hasattr(wrapper, 'api_key')
-
- def test_async_client_wrapper_creation(self, mock_api_key):
- """Test asynchronous client wrapper creation."""
- client = AsyncBaseClient(api_key=mock_api_key)
-
- wrapper = client._client_wrapper
- assert wrapper is not None
- assert hasattr(wrapper, 'get_environment')
- assert hasattr(wrapper, 'get_headers')
- assert hasattr(wrapper, 'api_key')
-
- def test_client_wrapper_environment_access(self, mock_api_key):
- """Test client wrapper environment access."""
- client = BaseClient(
- api_key=mock_api_key,
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- environment = client._client_wrapper.get_environment()
- assert environment is not None
- assert hasattr(environment, 'production')
-
- def test_client_wrapper_headers_access(self, mock_api_key):
- """Test client wrapper headers access."""
- custom_headers = {"X-Test-Header": "test-value"}
- client = BaseClient(api_key=mock_api_key, headers=custom_headers)
-
- headers = client._client_wrapper.get_headers()
- assert isinstance(headers, dict)
- assert "X-Test-Header" in headers
- assert headers["X-Test-Header"] == "test-value"
-
- def test_client_wrapper_api_key_access(self, mock_api_key):
- """Test client wrapper API key access."""
- client = BaseClient(api_key=mock_api_key)
-
- api_key = client._client_wrapper.api_key
- assert api_key == mock_api_key
diff --git a/tests/integrations/test_client.py b/tests/integrations/test_client.py
deleted file mode 100644
index db465d93..00000000
--- a/tests/integrations/test_client.py
+++ /dev/null
@@ -1,450 +0,0 @@
-"""Integration tests for DeepgramClient and AsyncDeepgramClient."""
-
-import pytest
-from unittest.mock import Mock, patch, MagicMock
-import uuid
-from typing import Dict, Any
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.base_client import BaseClient, AsyncBaseClient
-from deepgram.environment import DeepgramClientEnvironment
-from deepgram.core.api_error import ApiError
-
-
-class TestDeepgramClient:
- """Test cases for DeepgramClient (synchronous)."""
-
- def test_client_initialization_with_api_key(self, mock_api_key):
- """Test client initialization with API key."""
- client = DeepgramClient(api_key=mock_api_key)
-
- assert client is not None
- assert isinstance(client, BaseClient)
- assert hasattr(client, 'session_id')
- assert isinstance(client.session_id, str)
-
- # Verify UUID format
- try:
- uuid.UUID(client.session_id)
- except ValueError:
- pytest.fail("session_id should be a valid UUID")
-
- def test_client_initialization_with_access_token(self, mock_access_token):
- """Test client initialization with access token."""
- client = DeepgramClient(access_token=mock_access_token)
-
- assert client is not None
- assert isinstance(client, BaseClient)
- assert hasattr(client, 'session_id')
-
- def test_client_initialization_with_env_var(self, mock_env_vars, mock_api_key):
- """Test client initialization using environment variable simulation."""
- # Since environment variable mocking is complex, test with direct API key
- # This still validates the client initialization path
- client = DeepgramClient(api_key=mock_api_key)
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
- def test_client_initialization_with_custom_headers(self, mock_api_key):
- """Test client initialization with custom headers."""
- custom_headers = {"X-Custom-Header": "test-value"}
- client = DeepgramClient(api_key=mock_api_key, headers=custom_headers)
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
- def test_client_initialization_with_environment(self, mock_api_key):
- """Test client initialization with specific environment."""
- client = DeepgramClient(
- api_key=mock_api_key,
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
- def test_client_initialization_without_credentials(self):
- """Test client initialization fails without credentials."""
- with patch.dict('os.environ', {}, clear=True):
- with pytest.raises(ApiError) as exc_info:
- DeepgramClient()
-
- assert "api_key" in str(exc_info.value.body).lower()
-
- def test_client_properties_lazy_loading(self, mock_api_key):
- """Test that client properties are lazily loaded."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Initially, properties should be None
- assert client._agent is None
- assert client._auth is None
- assert client._listen is None
- assert client._manage is None
- assert client._read is None
- assert client._self_hosted is None
- assert client._speak is None
-
- # Access properties to trigger lazy loading
- agent = client.agent
- auth = client.auth
- listen = client.listen
- manage = client.manage
- read = client.read
- self_hosted = client.self_hosted
- speak = client.speak
-
- # Properties should now be loaded
- assert client._agent is not None
- assert client._auth is not None
- assert client._listen is not None
- assert client._manage is not None
- assert client._read is not None
- assert client._self_hosted is not None
- assert client._speak is not None
-
- # Subsequent access should return the same instances
- assert client.agent is agent
- assert client.auth is auth
- assert client.listen is listen
- assert client.manage is manage
- assert client.read is read
- assert client.self_hosted is self_hosted
- assert client.speak is speak
-
- @patch('deepgram.client._setup_telemetry')
- def test_client_telemetry_setup(self, mock_setup_telemetry, mock_api_key):
- """Test that telemetry is properly set up."""
- mock_setup_telemetry.return_value = Mock()
-
- client = DeepgramClient(
- api_key=mock_api_key,
- telemetry_opt_out=False
- )
-
- mock_setup_telemetry.assert_called_once()
- assert hasattr(client, '_telemetry_handler')
-
- def test_client_telemetry_opt_out(self, mock_api_key):
- """Test that telemetry can be opted out."""
- client = DeepgramClient(
- api_key=mock_api_key,
- telemetry_opt_out=True
- )
-
- assert client._telemetry_handler is None
-
- @patch('deepgram.client._apply_bearer_authorization_override')
- def test_client_bearer_token_override(self, mock_apply_bearer, mock_access_token, mock_api_key):
- """Test that bearer token authorization is properly applied."""
- client = DeepgramClient(access_token=mock_access_token)
-
- mock_apply_bearer.assert_called_once_with(
- client._client_wrapper,
- mock_access_token
- )
-
- def test_client_session_id_in_headers(self, mock_api_key):
- """Test that session ID is added to headers."""
- client = DeepgramClient(api_key=mock_api_key)
-
- headers = client._client_wrapper.get_headers()
- assert "x-deepgram-session-id" in headers
- assert headers["x-deepgram-session-id"] == client.session_id
-
- def test_client_with_custom_httpx_client(self, mock_api_key):
- """Test client initialization with custom httpx client."""
- import httpx
- custom_client = httpx.Client(timeout=30.0)
-
- client = DeepgramClient(
- api_key=mock_api_key,
- httpx_client=custom_client
- )
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
- def test_client_timeout_configuration(self, mock_api_key):
- """Test client timeout configuration."""
- client = DeepgramClient(
- api_key=mock_api_key,
- timeout=120.0
- )
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
- def test_client_follow_redirects_configuration(self, mock_api_key):
- """Test client redirect configuration."""
- client = DeepgramClient(
- api_key=mock_api_key,
- follow_redirects=False
- )
-
- assert client is not None
- assert isinstance(client, BaseClient)
-
-
-class TestAsyncDeepgramClient:
- """Test cases for AsyncDeepgramClient (asynchronous)."""
-
- def test_async_client_initialization_with_api_key(self, mock_api_key):
- """Test async client initialization with API key."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
- assert hasattr(client, 'session_id')
- assert isinstance(client.session_id, str)
-
- # Verify UUID format
- try:
- uuid.UUID(client.session_id)
- except ValueError:
- pytest.fail("session_id should be a valid UUID")
-
- def test_async_client_initialization_with_access_token(self, mock_access_token):
- """Test async client initialization with access token."""
- client = AsyncDeepgramClient(access_token=mock_access_token)
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
- assert hasattr(client, 'session_id')
-
- def test_async_client_initialization_with_env_var(self, mock_env_vars, mock_api_key):
- """Test async client initialization using environment variable simulation."""
- # Since environment variable mocking is complex, test with direct API key
- # This still validates the async client initialization path
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
-
- def test_async_client_initialization_without_credentials(self):
- """Test async client initialization fails without credentials."""
- with patch.dict('os.environ', {}, clear=True):
- with pytest.raises(ApiError) as exc_info:
- AsyncDeepgramClient()
-
- assert "api_key" in str(exc_info.value.body).lower()
-
- def test_async_client_properties_lazy_loading(self, mock_api_key):
- """Test that async client properties are lazily loaded."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Initially, properties should be None
- assert client._agent is None
- assert client._auth is None
- assert client._listen is None
- assert client._manage is None
- assert client._read is None
- assert client._self_hosted is None
- assert client._speak is None
-
- # Access properties to trigger lazy loading
- agent = client.agent
- auth = client.auth
- listen = client.listen
- manage = client.manage
- read = client.read
- self_hosted = client.self_hosted
- speak = client.speak
-
- # Properties should now be loaded
- assert client._agent is not None
- assert client._auth is not None
- assert client._listen is not None
- assert client._manage is not None
- assert client._read is not None
- assert client._self_hosted is not None
- assert client._speak is not None
-
- # Subsequent access should return the same instances
- assert client.agent is agent
- assert client.auth is auth
- assert client.listen is listen
- assert client.manage is manage
- assert client.read is read
- assert client.self_hosted is self_hosted
- assert client.speak is speak
-
- @patch('deepgram.client._setup_async_telemetry')
- def test_async_client_telemetry_setup(self, mock_setup_telemetry, mock_api_key):
- """Test that async telemetry is properly set up."""
- mock_setup_telemetry.return_value = Mock()
-
- client = AsyncDeepgramClient(
- api_key=mock_api_key,
- telemetry_opt_out=False
- )
-
- mock_setup_telemetry.assert_called_once()
- assert hasattr(client, '_telemetry_handler')
-
- def test_async_client_telemetry_opt_out(self, mock_api_key):
- """Test that async telemetry can be opted out."""
- client = AsyncDeepgramClient(
- api_key=mock_api_key,
- telemetry_opt_out=True
- )
-
- assert client._telemetry_handler is None
-
- @patch('deepgram.client._apply_bearer_authorization_override')
- def test_async_client_bearer_token_override(self, mock_apply_bearer, mock_access_token):
- """Test that bearer token authorization is properly applied for async client."""
- client = AsyncDeepgramClient(access_token=mock_access_token)
-
- mock_apply_bearer.assert_called_once_with(
- client._client_wrapper,
- mock_access_token
- )
-
- def test_async_client_session_id_in_headers(self, mock_api_key):
- """Test that session ID is added to headers for async client."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- headers = client._client_wrapper.get_headers()
- assert "x-deepgram-session-id" in headers
- assert headers["x-deepgram-session-id"] == client.session_id
-
- def test_async_client_with_custom_httpx_client(self, mock_api_key):
- """Test async client initialization with custom httpx client."""
- import httpx
- custom_client = httpx.AsyncClient(timeout=30.0)
-
- client = AsyncDeepgramClient(
- api_key=mock_api_key,
- httpx_client=custom_client
- )
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
-
- def test_async_client_timeout_configuration(self, mock_api_key):
- """Test async client timeout configuration."""
- client = AsyncDeepgramClient(
- api_key=mock_api_key,
- timeout=120.0
- )
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
-
- def test_async_client_follow_redirects_configuration(self, mock_api_key):
- """Test async client redirect configuration."""
- client = AsyncDeepgramClient(
- api_key=mock_api_key,
- follow_redirects=False
- )
-
- assert client is not None
- assert isinstance(client, AsyncBaseClient)
-
-
-class TestClientUtilityFunctions:
- """Test utility functions used by clients."""
-
- def test_create_telemetry_context(self):
- """Test telemetry context creation."""
- from deepgram.client import _create_telemetry_context
-
- with patch('deepgram.client.sys.version', '3.9.0 (default, Oct 9 2020, 15:07:18)'), \
- patch('deepgram.client.platform.system', return_value='Linux'), \
- patch('deepgram.client.platform.machine', return_value='x86_64'):
-
- session_id = str(uuid.uuid4())
- context = _create_telemetry_context(session_id)
-
- assert context["package_name"] == "python-sdk"
- assert context["language"] == "python"
- assert context["runtime_version"] == "python 3.9.0"
- assert context["os"] == "linux"
- assert context["arch"] == "x86_64"
- assert context["session_id"] == session_id
- assert "package_version" in context
- assert "environment" in context
-
- def test_create_telemetry_context_fallback(self):
- """Test telemetry context creation with fallback."""
- from deepgram.client import _create_telemetry_context
-
- with patch('deepgram.client.sys.version', side_effect=Exception("Test error")):
- session_id = str(uuid.uuid4())
- context = _create_telemetry_context(session_id)
-
- assert context["package_name"] == "python-sdk"
- assert context["language"] == "python"
- assert context["session_id"] == session_id
-
- def test_setup_telemetry(self, mock_api_key):
- """Test telemetry setup."""
- from deepgram.client import _setup_telemetry
- from deepgram.core.client_wrapper import SyncClientWrapper
-
- with patch('deepgram.extensions.telemetry.batching_handler.BatchingTelemetryHandler') as mock_handler_class:
- mock_handler = Mock()
- mock_handler_class.return_value = mock_handler
-
- client_wrapper = SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- session_id = str(uuid.uuid4())
- result = _setup_telemetry(
- session_id=session_id,
- telemetry_opt_out=False,
- telemetry_handler=None,
- client_wrapper=client_wrapper
- )
-
- assert result is not None # The actual handler is created, not the mock
- # The handler class may not be called directly due to internal implementation
- # Just verify that a result was returned
-
- def test_setup_telemetry_opt_out(self, mock_api_key):
- """Test telemetry setup with opt-out."""
- from deepgram.client import _setup_telemetry
- from deepgram.core.client_wrapper import SyncClientWrapper
-
- client_wrapper = SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- session_id = str(uuid.uuid4())
- result = _setup_telemetry(
- session_id=session_id,
- telemetry_opt_out=True,
- telemetry_handler=None,
- client_wrapper=client_wrapper
- )
-
- assert result is None
-
- def test_apply_bearer_authorization_override(self, mock_api_key):
- """Test bearer authorization override."""
- from deepgram.client import _apply_bearer_authorization_override
- from deepgram.core.client_wrapper import SyncClientWrapper
-
- client_wrapper = SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- bearer_token = "test_bearer_token"
- _apply_bearer_authorization_override(client_wrapper, bearer_token)
-
- headers = client_wrapper.get_headers()
- assert headers["Authorization"] == f"bearer {bearer_token}"
diff --git a/tests/integrations/test_integration_scenarios.py b/tests/integrations/test_integration_scenarios.py
deleted file mode 100644
index fc442d66..00000000
--- a/tests/integrations/test_integration_scenarios.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""End-to-end integration test scenarios across multiple products."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch, MagicMock
-import json
-import asyncio
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.events import EventType
-
-
-class TestMultiProductIntegrationScenarios:
- """Test integration scenarios that span multiple Deepgram products."""
-
- @patch('deepgram.listen.v1.socket_client.V1SocketClient._handle_json_message')
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_listen_to_speak_workflow(self, mock_websocket_connect, mock_handle_json, mock_api_key, sample_audio_data, sample_text):
- """Test workflow from Listen transcription to Speak TTS."""
- # Mock Listen websocket connection
- mock_listen_ws = Mock()
- mock_listen_ws.send = Mock()
- mock_listen_ws.recv = Mock(side_effect=[
- '{"type": "Results", "channel": {"alternatives": [{"transcript": "Hello world"}]}}'
- ])
- mock_listen_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Results", "channel": {"alternatives": [{"transcript": "Hello world"}]}}'
- ]))
- mock_listen_ws.__enter__ = Mock(return_value=mock_listen_ws)
- mock_listen_ws.__exit__ = Mock(return_value=None)
-
- # Mock Speak websocket connection
- mock_speak_ws = Mock()
- mock_speak_ws.send = Mock()
- mock_speak_ws.recv = Mock(side_effect=[b'\x00\x01\x02\x03']) # Audio chunk
- mock_speak_ws.__iter__ = Mock(return_value=iter([b'\x00\x01\x02\x03']))
- mock_speak_ws.__enter__ = Mock(return_value=mock_speak_ws)
- mock_speak_ws.__exit__ = Mock(return_value=None)
-
- # Alternate between Listen and Speak connections
- mock_websocket_connect.side_effect = [mock_listen_ws, mock_speak_ws]
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.return_value = {"type": "Results", "channel": {"alternatives": [{"transcript": "Hello world"}]}}
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Step 1: Transcribe audio with Listen
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as listen_conn:
- listen_conn.send_media(sample_audio_data)
- transcription_result = listen_conn.recv()
- assert transcription_result is not None
-
- # Step 2: Generate speech from transcription with Speak
- with client.speak.v1.with_raw_response.connect(model="aura-asteria-en") as speak_conn:
- speak_conn.send_text(Mock()) # Would use transcription text
- audio_result = speak_conn.recv()
- assert audio_result is not None
-
- # Verify both connections were established
- assert mock_websocket_connect.call_count == 2
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_agent_with_listen_speak_integration(self, mock_websocket_connect, mock_api_key, sample_audio_data):
- """Test Agent integration with Listen and Speak capabilities."""
- # Mock Agent websocket connection
- mock_agent_ws = Mock()
- mock_agent_ws.send = Mock()
- mock_agent_ws.recv = Mock(side_effect=[
- '{"type": "Welcome", "request_id": "agent-123"}',
- '{"type": "ConversationText", "role": "assistant", "content": "How can I help you?"}',
- b'\x00\x01\x02\x03' # Generated speech audio
- ])
- mock_agent_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Welcome", "request_id": "agent-123"}',
- '{"type": "ConversationText", "role": "assistant", "content": "How can I help you?"}',
- b'\x00\x01\x02\x03'
- ]))
- mock_agent_ws.__enter__ = Mock(return_value=mock_agent_ws)
- mock_agent_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_agent_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect to Agent (which internally uses Listen and Speak)
- with client.agent.v1.with_raw_response.connect() as agent_conn:
- # Send initial settings
- agent_conn.send_settings(Mock())
-
- # Send user audio (Listen functionality)
- agent_conn.send_media(sample_audio_data)
-
- # Receive welcome message
- welcome = agent_conn.recv()
- assert welcome is not None
-
- # Receive conversation response
- response = agent_conn.recv()
- assert response is not None
-
- # Receive generated audio (Speak functionality)
- audio = agent_conn.recv()
- assert audio is not None
-
- # Verify connection was established
- mock_websocket_connect.assert_called_once()
-
- def test_multi_client_concurrent_usage(self, mock_api_key):
- """Test concurrent usage of multiple product clients."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access multiple product clients concurrently
- listen_client = client.listen
- speak_client = client.speak
- agent_client = client.agent
- auth_client = client.auth
- manage_client = client.manage
- read_client = client.read
- self_hosted_client = client.self_hosted
-
- # Verify all clients are properly initialized
- assert listen_client is not None
- assert speak_client is not None
- assert agent_client is not None
- assert auth_client is not None
- assert manage_client is not None
- assert read_client is not None
- assert self_hosted_client is not None
-
- # Verify they're all different instances
- clients = [listen_client, speak_client, agent_client, auth_client,
- manage_client, read_client, self_hosted_client]
- for i, client1 in enumerate(clients):
- for j, client2 in enumerate(clients):
- if i != j:
- assert client1 is not client2
-
- @pytest.mark.asyncio
- async def test_async_multi_product_workflow(self, mock_api_key):
- """Test async workflow across multiple products."""
- with patch('deepgram.auth.v1.tokens.raw_client.AsyncRawTokensClient.grant') as mock_grant, \
- patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze') as mock_analyze:
-
- # Mock auth token generation
- from deepgram.types.grant_v1response import GrantV1Response
- mock_auth_response = Mock()
- mock_auth_response.data = GrantV1Response(access_token="temp_token", expires_in=3600)
- mock_grant.return_value = mock_auth_response
-
- # Mock text analysis
- from deepgram.types.read_v1response import ReadV1Response
- from deepgram.types.read_v1response_metadata import ReadV1ResponseMetadata
- from deepgram.types.read_v1response_results import ReadV1ResponseResults
- mock_read_response = Mock()
- mock_read_response.data = ReadV1Response(
- metadata=ReadV1ResponseMetadata(),
- results=ReadV1ResponseResults()
- )
- mock_analyze.return_value = mock_read_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Step 1: Generate temporary token
- token_result = await client.auth.v1.tokens.grant(ttl_seconds=3600)
- assert token_result is not None
- assert isinstance(token_result, GrantV1Response)
-
- # Step 2: Analyze text
- from deepgram.requests.read_v1request_text import ReadV1RequestTextParams
- text_request = ReadV1RequestTextParams(text="Sample text for analysis")
- analysis_result = await client.read.v1.text.analyze(
- request=text_request,
- sentiment=True,
- topics=True
- )
- assert analysis_result is not None
- assert isinstance(analysis_result, ReadV1Response)
-
- # Verify both calls were made
- mock_grant.assert_called_once()
- mock_analyze.assert_called_once()
-
- def test_client_isolation_across_products(self, mock_api_key):
- """Test that product clients maintain proper isolation."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- # Verify top-level product clients are isolated
- assert client1.listen is not client2.listen
- assert client1.speak is not client2.speak
- assert client1.agent is not client2.agent
- assert client1.auth is not client2.auth
- assert client1.manage is not client2.manage
- assert client1.read is not client2.read
- assert client1.self_hosted is not client2.self_hosted
-
- # Verify nested clients are also isolated
- assert client1.listen.v1 is not client2.listen.v1
- assert client1.speak.v1 is not client2.speak.v1
- assert client1.agent.v1 is not client2.agent.v1
- assert client1.auth.v1 is not client2.auth.v1
- assert client1.manage.v1 is not client2.manage.v1
- assert client1.read.v1 is not client2.read.v1
- assert client1.self_hosted.v1 is not client2.self_hosted.v1
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_multi_product(self, mock_api_key):
- """Test mixing synchronous and asynchronous clients across products."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Verify sync and async clients are different types
- assert type(sync_client.listen) != type(async_client.listen)
- assert type(sync_client.speak) != type(async_client.speak)
- assert type(sync_client.agent) != type(async_client.agent)
- assert type(sync_client.auth) != type(async_client.auth)
- assert type(sync_client.manage) != type(async_client.manage)
- assert type(sync_client.read) != type(async_client.read)
- assert type(sync_client.self_hosted) != type(async_client.self_hosted)
-
- # Verify nested clients are also different types
- assert type(sync_client.listen.v1) != type(async_client.listen.v1)
- assert type(sync_client.speak.v1) != type(async_client.speak.v1)
- assert type(sync_client.agent.v1) != type(async_client.agent.v1)
- assert type(sync_client.auth.v1) != type(async_client.auth.v1)
- assert type(sync_client.manage.v1) != type(async_client.manage.v1)
- assert type(sync_client.read.v1) != type(async_client.read.v1)
- assert type(sync_client.self_hosted.v1) != type(async_client.self_hosted.v1)
-
-
-class TestErrorHandlingScenarios:
- """Test error handling across integration scenarios."""
-
- def test_connection_failure_handling(self, mock_api_key):
- """Test connection failure handling."""
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.side_effect = ConnectionError("Network unavailable")
-
- client = DeepgramClient(api_key=mock_api_key)
-
- # Test that connection failures are properly handled across products
- with pytest.raises(ConnectionError):
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- pass
-
- with pytest.raises(ConnectionError):
- with client.speak.v1.with_raw_response.connect() as connection:
- pass
-
- with pytest.raises(ConnectionError):
- with client.agent.v1.with_raw_response.connect() as connection:
- pass
-
- def test_message_processing_error_handling(self, mock_api_key):
- """Test message processing error handling."""
- with patch('websockets.sync.client.connect') as mock_connect:
- # Mock websocket that sends invalid JSON
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=['{"invalid": json}'])
- mock_ws.__iter__ = Mock(return_value=iter(['{"invalid": json}']))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_connect.return_value = mock_ws
-
- client = DeepgramClient(api_key=mock_api_key)
-
- # Test that invalid JSON raises JSONDecodeError
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- with pytest.raises(json.JSONDecodeError):
- connection.recv()
-
- @pytest.mark.asyncio
- async def test_async_connection_failure_handling(self, mock_api_key):
- """Test async connection failure handling."""
- with patch('deepgram.listen.v1.raw_client.websockets_client_connect') as mock_connect:
- mock_connect.side_effect = ConnectionError("Async network unavailable")
-
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Test that async connection failures are properly handled
- with pytest.raises(ConnectionError):
- async with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- pass
\ No newline at end of file
diff --git a/tests/integrations/test_listen_client.py b/tests/integrations/test_listen_client.py
deleted file mode 100644
index 34a22d26..00000000
--- a/tests/integrations/test_listen_client.py
+++ /dev/null
@@ -1,1226 +0,0 @@
-"""Integration tests for Listen client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch, MagicMock
-from contextlib import contextmanager, asynccontextmanager
-import httpx
-import websockets.exceptions
-import json
-import asyncio
-from json.decoder import JSONDecodeError
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.core.events import EventType
-from deepgram.environment import DeepgramClientEnvironment
-
-# Import Listen clients
-from deepgram.listen.client import ListenClient, AsyncListenClient
-from deepgram.listen.v1.client import V1Client as ListenV1Client, AsyncV1Client as ListenAsyncV1Client
-from deepgram.listen.v2.client import V2Client as ListenV2Client, AsyncV2Client as ListenAsyncV2Client
-
-# Import Listen raw clients
-from deepgram.listen.v1.raw_client import RawV1Client as ListenRawV1Client, AsyncRawV1Client as ListenAsyncRawV1Client
-from deepgram.listen.v2.raw_client import RawV2Client as ListenRawV2Client, AsyncRawV2Client as ListenAsyncRawV2Client
-
-# Import Listen socket clients
-from deepgram.listen.v1.socket_client import V1SocketClient as ListenV1SocketClient, AsyncV1SocketClient as ListenAsyncV1SocketClient
-from deepgram.listen.v2.socket_client import V2SocketClient as ListenV2SocketClient, AsyncV2SocketClient as ListenAsyncV2SocketClient
-
-# Import Listen media clients
-from deepgram.listen.v1.media.client import MediaClient, AsyncMediaClient
-
-# Import socket message types
-from deepgram.extensions.types.sockets import (
- ListenV1ControlMessage,
- ListenV1MediaMessage,
- ListenV2ControlMessage,
- ListenV2MediaMessage,
-)
-
-# Import request and response types for mocking
-from deepgram.types.listen_v1response import ListenV1Response
-from deepgram.listen.v1.media.types.media_transcribe_request_callback_method import MediaTranscribeRequestCallbackMethod
-from deepgram.listen.v1.media.types.media_transcribe_request_summarize import MediaTranscribeRequestSummarize
-from deepgram.listen.v1.media.types.media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode
-from deepgram.listen.v1.media.types.media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode
-from deepgram.listen.v1.media.types.media_transcribe_request_encoding import MediaTranscribeRequestEncoding
-from deepgram.listen.v1.media.types.media_transcribe_request_model import MediaTranscribeRequestModel
-from deepgram.listen.v1.media.types.media_transcribe_request_version import MediaTranscribeRequestVersion
-
-
-class TestListenClient:
- """Test cases for Listen Client."""
-
- def test_listen_client_initialization(self, mock_api_key):
- """Test ListenClient initialization."""
- client = DeepgramClient(api_key=mock_api_key).listen
- assert client is not None
- assert hasattr(client, 'v1')
- assert hasattr(client, 'v2')
-
- def test_async_listen_client_initialization(self, mock_api_key):
- """Test AsyncListenClient initialization."""
- client = AsyncDeepgramClient(api_key=mock_api_key).listen
- assert client is not None
- assert hasattr(client, 'v1')
- assert hasattr(client, 'v2')
-
- def test_listen_client_with_raw_response(self, mock_api_key):
- """Test ListenClient with_raw_response property."""
- client = DeepgramClient(api_key=mock_api_key).listen
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
- def test_async_listen_client_with_raw_response(self, mock_api_key):
- """Test AsyncListenClient with_raw_response property."""
- client = AsyncDeepgramClient(api_key=mock_api_key).listen
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
-
-class TestListenRawV1Client:
- """Test cases for Listen V1 Raw Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_sync_raw_client_initialization(self, sync_client_wrapper):
- """Test synchronous raw client initialization."""
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
-
- def test_async_raw_client_initialization(self, async_client_wrapper):
- """Test asynchronous raw client initialization."""
- client = ListenAsyncRawV1Client(client_wrapper=async_client_wrapper)
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_sync_connect_success(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test successful synchronous WebSocket connection."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- with client.connect(model="nova-2-general") as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_sync_connect_with_all_parameters(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test synchronous connection with all parameters."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- with client.connect(
- model="nova-2-general",
- encoding="linear16",
- sample_rate="16000",
- channels="1",
- language="en-US",
- punctuate="true",
- smart_format="true",
- diarize="true",
- interim_results="true",
- utterance_end_ms="1000",
- vad_events="true",
- authorization="Bearer test_token"
- ) as connection:
- assert connection is not None
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_sync_connect_invalid_credentials(self, mock_websocket_connect, sync_client_wrapper):
- """Test synchronous connection with invalid credentials."""
- mock_websocket_connect.side_effect = websockets.exceptions.InvalidStatusCode(
- status_code=401, headers={}
- )
-
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- with client.connect(model="nova-2-general") as connection:
- pass
-
- assert exc_info.value.status_code == 401
- assert "invalid credentials" in exc_info.value.body.lower()
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_sync_connect_unexpected_error(self, mock_websocket_connect, sync_client_wrapper):
- """Test synchronous connection with unexpected error."""
- mock_websocket_connect.side_effect = Exception("Unexpected connection error")
-
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(Exception) as exc_info:
- with client.connect(model="nova-2-general") as connection:
- pass
-
- assert "Unexpected connection error" in str(exc_info.value)
-
- @patch('deepgram.listen.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_connect_success(self, mock_websocket_connect, async_client_wrapper, mock_async_websocket):
- """Test successful asynchronous WebSocket connection."""
- mock_websocket_connect.return_value.__aenter__ = AsyncMock(return_value=mock_async_websocket)
- mock_websocket_connect.return_value.__aexit__ = AsyncMock(return_value=None)
-
- client = ListenAsyncRawV1Client(client_wrapper=async_client_wrapper)
-
- async with client.connect(model="nova-2-general") as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- @patch('deepgram.listen.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_connect_with_all_parameters(self, mock_websocket_connect, async_client_wrapper, mock_async_websocket):
- """Test asynchronous connection with all parameters."""
- mock_websocket_connect.return_value.__aenter__ = AsyncMock(return_value=mock_async_websocket)
- mock_websocket_connect.return_value.__aexit__ = AsyncMock(return_value=None)
-
- client = ListenAsyncRawV1Client(client_wrapper=async_client_wrapper)
-
- async with client.connect(
- model="nova-2-general",
- encoding="linear16",
- sample_rate="16000",
- channels="1"
- ) as connection:
- assert connection is not None
-
- @patch('deepgram.listen.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_connect_invalid_credentials(self, mock_websocket_connect, async_client_wrapper):
- """Test asynchronous connection with invalid credentials."""
- mock_websocket_connect.side_effect = websockets.exceptions.InvalidStatusCode(
- status_code=401, headers={}
- )
-
- client = ListenAsyncRawV1Client(client_wrapper=async_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- async with client.connect(model="nova-2-general") as connection:
- pass
-
- assert exc_info.value.status_code == 401
- assert "invalid credentials" in exc_info.value.body.lower()
-
- def test_sync_query_params_construction(self, sync_client_wrapper):
- """Test query parameters are properly constructed."""
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- # Mock the websocket connection to capture the URL
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.return_value.__enter__ = Mock(return_value=Mock())
- mock_connect.return_value.__exit__ = Mock(return_value=None)
-
- try:
- with client.connect(
- model="nova-2-general",
- encoding="linear16",
- sample_rate="16000",
- punctuate="true"
- ) as connection:
- pass
- except:
- pass # We just want to check the URL construction
-
- # Verify the URL was constructed with query parameters
- call_args = mock_connect.call_args
- if call_args and len(call_args[0]) > 0:
- url = call_args[0][0]
- assert "model=nova-2-general" in url
- assert "encoding=linear16" in url
- assert "sample_rate=16000" in url
- assert "punctuate=true" in url
-
- def test_sync_headers_construction(self, sync_client_wrapper):
- """Test headers are properly constructed."""
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- # Mock the websocket connection to capture headers
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.return_value.__enter__ = Mock(return_value=Mock())
- mock_connect.return_value.__exit__ = Mock(return_value=None)
-
- try:
- with client.connect(
- model="nova-2-general",
- authorization="Bearer custom_token"
- ) as connection:
- pass
- except:
- pass # We just want to check the headers construction
-
- # Verify headers were passed
- call_args = mock_connect.call_args
- if call_args and 'additional_headers' in call_args[1]:
- headers = call_args[1]['additional_headers']
- assert 'Authorization' in headers
-
- def test_sync_request_options(self, sync_client_wrapper):
- """Test request options are properly handled."""
- client = ListenRawV1Client(client_wrapper=sync_client_wrapper)
-
- request_options = RequestOptions(
- additional_headers={"Custom-Header": "custom-value"},
- timeout_in_seconds=30.0
- )
-
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.return_value.__enter__ = Mock(return_value=Mock())
- mock_connect.return_value.__exit__ = Mock(return_value=None)
-
- try:
- with client.connect(
- model="nova-2-general",
- request_options=request_options
- ) as connection:
- pass
- except:
- pass # We just want to check the options handling
-
- # Verify request options were applied
- call_args = mock_connect.call_args
- assert call_args is not None
-
-
-class TestListenRawV2Client:
- """Test cases for Listen V2 Raw Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- def test_sync_raw_v2_client_initialization(self, sync_client_wrapper):
- """Test synchronous raw V2 client initialization."""
- client = ListenRawV2Client(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
-
- @patch('deepgram.listen.v2.raw_client.websockets_sync_client.connect')
- def test_sync_v2_connect_success(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test successful V2 synchronous WebSocket connection."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = ListenRawV2Client(client_wrapper=sync_client_wrapper)
-
- with client.connect(model="nova-2-general", encoding="linear16", sample_rate="16000") as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
-
-class TestListenV1SocketClient:
- """Test cases for Listen V1 Socket Client."""
-
- @pytest.fixture
- def mock_sync_websocket(self):
- """Create a mock synchronous websocket."""
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock()
- mock_ws.__iter__ = Mock(return_value=iter([]))
- return mock_ws
-
- @pytest.fixture
- def mock_async_websocket(self):
- """Create a mock asynchronous websocket."""
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock()
- mock_ws.__aiter__ = AsyncMock(return_value=iter([]))
- return mock_ws
-
- def test_sync_socket_client_initialization(self, mock_sync_websocket):
- """Test synchronous socket client initialization."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- assert client is not None
- assert client._websocket is mock_sync_websocket
-
- def test_async_socket_client_initialization(self, mock_async_websocket):
- """Test asynchronous socket client initialization."""
- client = ListenAsyncV1SocketClient(websocket=mock_async_websocket)
- assert client is not None
- assert client._websocket is mock_async_websocket
-
- def test_is_binary_message_detection(self, mock_sync_websocket):
- """Test binary message detection."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- # Test with bytes
- assert client._is_binary_message(b'binary data') is True
-
- # Test with bytearray
- assert client._is_binary_message(bytearray(b'binary data')) is True
-
- # Test with string
- assert client._is_binary_message('text data') is False
-
- # Test with dict
- assert client._is_binary_message({'key': 'value'}) is False
-
- def test_handle_binary_message(self, mock_sync_websocket, sample_audio_data):
- """Test binary message handling."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- # Test handling binary audio data
- result = client._handle_binary_message(sample_audio_data)
- assert result == sample_audio_data
-
- def test_handle_json_message_success(self, mock_sync_websocket):
- """Test successful JSON message handling."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- json_message = '{"type": "Metadata", "request_id": "test-123", "sha256": "abc123", "created": "2023-01-01T00:00:00Z", "duration": 5.0, "channels": 1}'
- result = client._handle_json_message(json_message)
-
- assert result is not None
- assert result.type == "Metadata"
- assert result.request_id == "test-123"
- assert result.sha256 == "abc123"
-
- def test_handle_json_message_invalid(self, mock_sync_websocket):
- """Test invalid JSON message handling."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- invalid_json = '{"invalid": json}'
-
- # Should raise JSONDecodeError for invalid JSON
- with pytest.raises(json.JSONDecodeError):
- client._handle_json_message(invalid_json)
-
- @patch('deepgram.listen.v1.socket_client.V1SocketClient._handle_json_message')
- def test_sync_iteration(self, mock_handle_json, mock_sync_websocket):
- """Test synchronous iteration over websocket messages."""
- mock_sync_websocket.__iter__ = Mock(return_value=iter([
- '{"type": "Metadata", "request_id": "test-1"}',
- b'\x00\x01\x02\x03',
- '{"type": "Results", "channel_index": [0]}'
- ]))
-
- # Mock the JSON handling to return simple objects
- mock_handle_json.side_effect = [
- {"type": "Metadata", "request_id": "test-1"},
- {"type": "Results", "channel_index": [0]}
- ]
-
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- messages = list(client)
- assert len(messages) == 3
- assert messages[0]["type"] == "Metadata"
- assert messages[1] == b'\x00\x01\x02\x03'
- assert messages[2]["type"] == "Results"
-
- @patch('deepgram.listen.v1.socket_client.AsyncV1SocketClient._handle_json_message')
- @pytest.mark.asyncio
- async def test_async_iteration(self, mock_handle_json, mock_async_websocket):
- """Test asynchronous iteration over websocket messages."""
- async def mock_aiter():
- yield '{"type": "Metadata", "request_id": "test-1"}'
- yield b'\x00\x01\x02\x03'
- yield '{"type": "Results", "channel_index": [0]}'
-
- mock_async_websocket.__aiter__ = Mock(return_value=mock_aiter())
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.side_effect = [
- {"type": "Metadata", "request_id": "test-1"},
- {"type": "Results", "channel_index": [0]}
- ]
-
- client = ListenAsyncV1SocketClient(websocket=mock_async_websocket)
-
- messages = []
- async for message in client:
- messages.append(message)
-
- assert len(messages) == 3
- assert messages[0]["type"] == "Metadata"
- assert messages[1] == b'\x00\x01\x02\x03'
- assert messages[2]["type"] == "Results"
-
- def test_sync_recv_binary(self, mock_sync_websocket, sample_audio_data):
- """Test synchronous receive of binary data."""
- mock_sync_websocket.recv.return_value = sample_audio_data
-
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- result = client.recv()
-
- assert result == sample_audio_data
- mock_sync_websocket.recv.assert_called_once()
-
- def test_sync_recv_json(self, mock_sync_websocket):
- """Test synchronous receive of JSON data."""
- json_message = '{"type": "Metadata", "request_id": "test-123", "sha256": "abc123", "created": "2023-01-01T00:00:00Z", "duration": 5.0, "channels": 1}'
- mock_sync_websocket.recv.return_value = json_message
-
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- result = client.recv()
-
- assert result.type == "Metadata"
- assert result.request_id == "test-123"
- mock_sync_websocket.recv.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_async_recv_binary(self, mock_async_websocket, sample_audio_data):
- """Test asynchronous receive of binary data."""
- mock_async_websocket.recv.return_value = sample_audio_data
-
- client = ListenAsyncV1SocketClient(websocket=mock_async_websocket)
- result = await client.recv()
-
- assert result == sample_audio_data
- mock_async_websocket.recv.assert_called_once()
-
- def test_sync_send_binary(self, mock_sync_websocket, sample_audio_data):
- """Test synchronous sending of binary data."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- client.send_media(sample_audio_data)
-
- mock_sync_websocket.send.assert_called_once_with(sample_audio_data)
-
- def test_sync_send_dict(self, mock_sync_websocket):
- """Test synchronous sending of dictionary data."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- message_dict = {"type": "Metadata", "request_id": "test-123"}
-
- control_message = ListenV1ControlMessage(type="KeepAlive")
- client.send_control(control_message)
-
- mock_sync_websocket.send.assert_called_once()
- # Verify JSON was sent
- call_args = mock_sync_websocket.send.call_args[0]
- sent_data = call_args[0]
- assert isinstance(sent_data, str)
- parsed = json.loads(sent_data)
- assert parsed["type"] == "KeepAlive"
-
- def test_sync_send_string(self, mock_sync_websocket):
- """Test synchronous sending of string data."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
- message_str = '{"type": "KeepAlive"}'
-
- # For string data, we'll use the private _send method for testing
- client._send(message_str)
-
- mock_sync_websocket.send.assert_called_once_with(message_str)
-
- def test_sync_send_pydantic_model(self, mock_sync_websocket):
- """Test synchronous sending of Pydantic model."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- control_message = ListenV1ControlMessage(type="KeepAlive")
- client.send_control(control_message)
-
- mock_sync_websocket.send.assert_called_once()
-
- def test_sync_send_control(self, mock_sync_websocket):
- """Test synchronous control message sending."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- # Mock control message
- mock_control_msg = Mock(spec=ListenV1ControlMessage)
- mock_control_msg.dict.return_value = {"type": "KeepAlive"}
-
- client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_sync_websocket.send.assert_called_once()
-
- def test_sync_send_media(self, mock_sync_websocket, sample_audio_data):
- """Test synchronous media message sending."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- client.send_media(sample_audio_data)
-
- mock_sync_websocket.send.assert_called_once_with(sample_audio_data)
-
- @patch('deepgram.listen.v1.socket_client.V1SocketClient._handle_json_message')
- def test_sync_start_listening_with_event_handler(self, mock_handle_json, mock_sync_websocket):
- """Test synchronous start_listening with event handler."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- # Mock websocket iteration
- mock_sync_websocket.__iter__ = Mock(return_value=iter([
- '{"type": "Metadata", "request_id": "test-123"}',
- '{"type": "Results", "channel_index": [0], "is_final": true}'
- ]))
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.side_effect = [
- {"type": "Metadata", "request_id": "test-123"},
- {"type": "Results", "channel_index": [0], "is_final": True}
- ]
-
- # Mock event handler
- event_handler = Mock()
- client.on(EventType.OPEN, event_handler)
- client.on(EventType.MESSAGE, event_handler)
- client.on(EventType.CLOSE, event_handler)
-
- # Start listening (this will iterate through the mock messages)
- client.start_listening()
-
- # Verify event handler was called
- assert event_handler.call_count >= 1
-
- def test_sync_start_listening_with_error(self, mock_sync_websocket):
- """Test synchronous start_listening with error."""
- client = ListenV1SocketClient(websocket=mock_sync_websocket)
-
- # Mock websocket to raise a websocket exception
- from websockets.exceptions import WebSocketException
- mock_sync_websocket.__iter__ = Mock(side_effect=WebSocketException("Connection error"))
-
- # Mock error handler
- error_handler = Mock()
- client.on(EventType.ERROR, error_handler)
-
- # Start listening (this should trigger error)
- client.start_listening()
-
- # Verify error handler was called
- error_handler.assert_called()
-
-
-class TestListenV2SocketClient:
- """Test cases for Listen V2 Socket Client."""
-
- def test_v2_sync_socket_client_initialization(self):
- """Test V2 synchronous socket client initialization."""
- mock_ws = Mock()
- client = ListenV2SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_v2_async_socket_client_initialization(self):
- """Test V2 asynchronous socket client initialization."""
- mock_ws = AsyncMock()
- client = ListenAsyncV2SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_v2_sync_send_control(self):
- """Test V2 synchronous control message sending."""
- mock_ws = Mock()
- client = ListenV2SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=ListenV2ControlMessage)
- mock_control_msg.dict.return_value = {"type": "KeepAlive"}
-
- client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_v2_async_send_control(self):
- """Test V2 asynchronous control message sending."""
- mock_ws = AsyncMock()
- client = ListenAsyncV2SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=ListenV2ControlMessage)
- mock_control_msg.dict.return_value = {"type": "KeepAlive"}
-
- await client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
-
-class TestListenMediaClient:
- """Test cases for Listen Media Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock()
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock()
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_listen_response(self):
- """Mock listen response data."""
- mock_response = Mock(spec=ListenV1Response)
- mock_response.metadata = Mock()
- mock_response.results = Mock()
- return mock_response
-
- def test_media_client_initialization(self, sync_client_wrapper):
- """Test MediaClient initialization."""
- client = MediaClient(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_media_client_initialization(self, async_client_wrapper):
- """Test AsyncMediaClient initialization."""
- client = AsyncMediaClient(client_wrapper=async_client_wrapper)
- assert client is not None
- assert client._raw_client is not None
-
- def test_media_client_raw_response_access(self, sync_client_wrapper):
- """Test MediaClient raw response access."""
- client = MediaClient(client_wrapper=sync_client_wrapper)
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_media_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncMediaClient raw response access."""
- client = AsyncMediaClient(client_wrapper=async_client_wrapper)
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url')
- def test_media_client_transcribe_url(self, mock_transcribe, sync_client_wrapper, mock_listen_response):
- """Test MediaClient transcribe_url method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = MediaClient(client_wrapper=sync_client_wrapper)
-
- result = client.transcribe_url(
- url="https://example.com/audio.mp3",
- model="nova-2-general"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
- assert result.metadata is not None
- assert result.results is not None
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url')
- def test_media_client_transcribe_url_with_all_features(self, mock_transcribe, sync_client_wrapper, mock_listen_response):
- """Test MediaClient transcribe_url with all features enabled."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = MediaClient(client_wrapper=sync_client_wrapper)
-
- result = client.transcribe_url(
- url="https://example.com/audio.mp3",
- model="nova-2-general",
- language="en-US",
- encoding="linear16",
- smart_format=True,
- punctuate=True,
- diarize=True,
- summarize="v2",
- sentiment=True,
- topics=True,
- intents=True,
- custom_topic_mode="extend",
- custom_intent_mode="extend"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
-
- # Verify the call was made with all parameters
- mock_transcribe.assert_called_once()
- call_args = mock_transcribe.call_args
- assert "model" in call_args[1]
- assert "smart_format" in call_args[1]
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_file')
- def test_media_client_transcribe_file(self, mock_transcribe, sync_client_wrapper, mock_listen_response, sample_audio_data):
- """Test MediaClient transcribe_file method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = MediaClient(client_wrapper=sync_client_wrapper)
-
- # Create a mock file-like object
- from io import BytesIO
- audio_file = BytesIO(sample_audio_data)
-
- result = client.transcribe_file(
- request=audio_file,
- model="nova-2-general"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url')
- def test_media_client_transcribe_url_with_callback(self, mock_transcribe, sync_client_wrapper, mock_listen_response):
- """Test MediaClient transcribe_url with callback configuration."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = MediaClient(client_wrapper=sync_client_wrapper)
-
- result = client.transcribe_url(
- url="https://example.com/audio.mp3",
- model="nova-2-general",
- callback="https://example.com/callback",
- callback_method="POST"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
- @patch('deepgram.listen.v1.media.raw_client.AsyncRawMediaClient.transcribe_url')
- @pytest.mark.asyncio
- async def test_async_media_client_transcribe_url(self, mock_transcribe, async_client_wrapper, mock_listen_response):
- """Test AsyncMediaClient transcribe_url method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = AsyncMediaClient(client_wrapper=async_client_wrapper)
-
- result = await client.transcribe_url(
- url="https://example.com/audio.mp3",
- model="nova-2-general"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
- @patch('deepgram.listen.v1.media.raw_client.AsyncRawMediaClient.transcribe_file')
- @pytest.mark.asyncio
- async def test_async_media_client_transcribe_file(self, mock_transcribe, async_client_wrapper, mock_listen_response, sample_audio_data):
- """Test AsyncMediaClient transcribe_file method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_listen_response
- mock_transcribe.return_value = mock_response
-
- client = AsyncMediaClient(client_wrapper=async_client_wrapper)
-
- # Create a mock file-like object
- from io import BytesIO
- audio_file = BytesIO(sample_audio_data)
-
- result = await client.transcribe_file(
- request=audio_file,
- model="nova-2-general"
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
-
-class TestListenIntegrationScenarios:
- """Test Listen API integration scenarios."""
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_listen_v1_transcription_workflow(self, mock_websocket_connect, mock_api_key, sample_audio_data):
- """Test complete Listen V1 transcription workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=[
- '{"type": "Metadata", "request_id": "test-123", "sha256": "abc123", "created": "2023-01-01T00:00:00Z", "duration": 1.0, "channels": 1}',
- '{"type": "Metadata", "request_id": "test-456", "sha256": "def456", "created": "2023-01-01T00:00:01Z", "duration": 2.0, "channels": 1}'
- ])
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Metadata", "request_id": "test-123", "sha256": "abc123", "created": "2023-01-01T00:00:00Z", "duration": 1.0, "channels": 1}',
- '{"type": "Metadata", "request_id": "test-456", "sha256": "def456", "created": "2023-01-01T00:00:01Z", "duration": 2.0, "channels": 1}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect and send audio
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- # Send control message
- connection.send_control(Mock())
-
- # Send audio data
- connection.send_media(sample_audio_data)
-
- # Receive transcription results
- result = connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('deepgram.listen.v2.socket_client.V2SocketClient._handle_json_message')
- @patch('deepgram.listen.v2.raw_client.websockets_sync_client.connect')
- def test_listen_v2_transcription_workflow(self, mock_websocket_connect, mock_handle_json, mock_api_key, sample_audio_data):
- """Test complete Listen V2 transcription workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=[
- '{"type": "Connected", "request_id": "test-v2-123"}',
- '{"type": "TurnInfo", "request_id": "test-v2-123", "turn_id": "turn-1"}'
- ])
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Connected", "request_id": "test-v2-123"}',
- '{"type": "TurnInfo", "request_id": "test-v2-123", "turn_id": "turn-1"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.return_value = {"type": "Connected", "request_id": "test-v2-123"}
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect and send audio
- with client.listen.v2.with_raw_response.connect(
- model="nova-2-general",
- encoding="linear16",
- sample_rate=16000
- ) as connection:
- # Send control message
- connection.send_control(Mock())
-
- # Send audio data
- connection.send_media(sample_audio_data)
-
- # Receive transcription results
- result = connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('deepgram.listen.v1.socket_client.V1SocketClient._handle_json_message')
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_listen_event_driven_workflow(self, mock_websocket_connect, mock_handle_json, mock_api_key):
- """Test Listen event-driven workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Metadata", "request_id": "event-test-123"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.return_value = {"type": "Metadata", "request_id": "event-test-123"}
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Mock event handlers
- on_open = Mock()
- on_message = Mock()
- on_close = Mock()
- on_error = Mock()
-
- # Connect with event handlers
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- # Set up event handlers
- connection.on(EventType.OPEN, on_open)
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, on_close)
- connection.on(EventType.ERROR, on_error)
-
- # Start listening (this will process the mock messages)
- connection.start_listening()
-
- # Verify event handlers were set up (they may or may not be called depending on mock behavior)
- assert hasattr(connection, 'on')
-
- @patch('deepgram.listen.v1.socket_client.AsyncV1SocketClient._handle_json_message')
- @patch('deepgram.listen.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_listen_transcription_workflow(self, mock_websocket_connect, mock_handle_json, mock_api_key, sample_audio_data):
- """Test async Listen transcription workflow."""
- # Mock async websocket connection
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock(side_effect=[
- '{"type": "Metadata", "request_id": "async-test-123"}',
- '{"type": "Results", "channel_index": [0]}'
- ])
-
- async def mock_aiter():
- yield '{"type": "Metadata", "request_id": "async-test-123"}'
- yield '{"type": "Results", "channel_index": [0]}'
-
- mock_ws.__aiter__ = Mock(return_value=mock_aiter())
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.return_value = {"type": "Metadata", "request_id": "async-test-123"}
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Connect and send audio
- async with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- # Send control message
- await connection.send_control(Mock())
-
- # Send audio data
- await connection.send_media(sample_audio_data)
-
- # Receive transcription results
- result = await connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- def test_complete_listen_media_workflow_sync(self, mock_api_key):
- """Test complete Listen Media workflow using sync client."""
- with patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url') as mock_transcribe:
- # Mock the response with Mock objects to avoid Pydantic validation
- mock_response = Mock()
- mock_response.data = Mock(spec=ListenV1Response)
- mock_response.data.metadata = Mock()
- mock_response.data.metadata.request_id = "media-sync-123"
- mock_response.data.results = Mock()
- mock_response.data.results.channels = [Mock()]
- mock_response.data.results.channels[0].alternatives = [Mock()]
- mock_response.data.results.channels[0].alternatives[0].transcript = "This is a test transcription."
- mock_transcribe.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested listen media functionality
- result = client.listen.v1.media.transcribe_url(
- url="https://example.com/test-audio.mp3",
- model="nova-2-general",
- smart_format=True,
- punctuate=True
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
- assert result.metadata is not None
- assert result.results is not None
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_listen_media_workflow_async(self, mock_api_key):
- """Test complete Listen Media workflow using async client."""
- with patch('deepgram.listen.v1.media.raw_client.AsyncRawMediaClient.transcribe_url') as mock_transcribe:
- # Mock the async response with Mock objects to avoid Pydantic validation
- mock_response = Mock()
- mock_response.data = Mock(spec=ListenV1Response)
- mock_response.data.metadata = Mock()
- mock_response.data.metadata.request_id = "media-async-456"
- mock_response.data.results = Mock()
- mock_response.data.results.channels = [Mock()]
- mock_response.data.results.channels[0].alternatives = [Mock()]
- mock_response.data.results.channels[0].alternatives[0].transcript = "This is an async test transcription."
- mock_transcribe.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested listen media functionality
- result = await client.listen.v1.media.transcribe_url(
- url="https://example.com/test-audio-async.mp3",
- model="nova-2-general",
- topics=True,
- sentiment=True
- )
-
- assert result is not None
- assert isinstance(result, ListenV1Response)
- assert result.metadata is not None
- assert result.results is not None
-
- # Verify the call was made
- mock_transcribe.assert_called_once()
-
-
-class TestListenErrorHandling:
- """Test Listen client error handling."""
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url')
- def test_media_client_api_error_handling(self, mock_transcribe, mock_api_key):
- """Test MediaClient API error handling."""
- # Mock an API error
- mock_transcribe.side_effect = ApiError(
- status_code=400,
- headers={},
- body="Invalid request parameters"
- )
-
- client = DeepgramClient(api_key=mock_api_key).listen.v1.media
-
- with pytest.raises(ApiError) as exc_info:
- client.transcribe_url(url="https://example.com/audio.mp3")
-
- assert exc_info.value.status_code == 400
- assert "Invalid request parameters" in str(exc_info.value.body)
-
- @patch('deepgram.listen.v1.media.raw_client.RawMediaClient.transcribe_url')
- def test_media_client_network_error_handling(self, mock_transcribe, mock_api_key):
- """Test MediaClient network error handling."""
- # Mock a network error
- mock_transcribe.side_effect = httpx.ConnectError("Connection failed")
-
- client = DeepgramClient(api_key=mock_api_key).listen.v1.media
-
- with pytest.raises(httpx.ConnectError):
- client.transcribe_url(url="https://example.com/audio.mp3")
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_websocket_connection_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test WebSocket connection error handling."""
- mock_websocket_connect.side_effect = websockets.exceptions.ConnectionClosedError(None, None)
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(websockets.exceptions.ConnectionClosedError):
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- pass
-
- @patch('deepgram.listen.v1.raw_client.websockets_sync_client.connect')
- def test_generic_websocket_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test generic WebSocket error handling."""
- mock_websocket_connect.side_effect = Exception("Generic WebSocket error")
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(Exception) as exc_info:
- with client.listen.v1.with_raw_response.connect(model="nova-2-general") as connection:
- pass
-
- assert "Generic WebSocket error" in str(exc_info.value)
-
-
-class TestListenSocketClientErrorScenarios:
- """Test Listen socket client error scenarios."""
-
- def test_json_decode_error_handling(self, mock_websocket):
- """Test JSON decode error handling."""
- mock_websocket.recv.return_value = '{"invalid": json}'
-
- client = ListenV1SocketClient(websocket=mock_websocket)
-
- # Should raise JSONDecodeError for invalid JSON
- with pytest.raises(json.JSONDecodeError):
- client.recv()
-
- def test_connection_closed_ok_no_error_emission(self, mock_websocket):
- """Test that normal connection closure doesn't emit error."""
- mock_websocket.__iter__ = Mock(side_effect=websockets.exceptions.ConnectionClosedOK(None, None))
-
- client = ListenV1SocketClient(websocket=mock_websocket)
-
- # Mock error handler
- error_handler = Mock()
- client.on(EventType.ERROR, error_handler)
-
- # Start listening (should handle ConnectionClosedOK gracefully)
- client.start_listening()
-
- # Error handler should not be called for normal closure
- error_handler.assert_not_called()
-
- @pytest.mark.asyncio
- async def test_async_connection_closed_ok_no_error_emission(self, mock_async_websocket):
- """Test that async normal connection closure doesn't emit error."""
- async def mock_aiter():
- raise websockets.exceptions.ConnectionClosedOK(None, None)
- yield # This will never be reached, but makes it a generator
-
- mock_async_websocket.__aiter__ = Mock(return_value=mock_aiter())
-
- client = ListenAsyncV1SocketClient(websocket=mock_async_websocket)
-
- # Mock error handler
- error_handler = Mock()
- client.on(EventType.ERROR, error_handler)
-
- # Start listening (should handle ConnectionClosedOK gracefully)
- await client.start_listening()
-
- # Error handler should not be called for normal closure
- error_handler.assert_not_called()
diff --git a/tests/integrations/test_manage_client.py b/tests/integrations/test_manage_client.py
deleted file mode 100644
index 886ce89d..00000000
--- a/tests/integrations/test_manage_client.py
+++ /dev/null
@@ -1,823 +0,0 @@
-"""Integration tests for Manage client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch
-import httpx
-import json
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-
-from deepgram.manage.client import ManageClient, AsyncManageClient
-from deepgram.manage.v1.client import V1Client as ManageV1Client, AsyncV1Client as ManageAsyncV1Client
-from deepgram.manage.v1.projects.client import ProjectsClient, AsyncProjectsClient
-from deepgram.manage.v1.models.client import ModelsClient, AsyncModelsClient
-
-# Import response types for mocking
-from deepgram.types.list_projects_v1response import ListProjectsV1Response
-from deepgram.types.get_project_v1response import GetProjectV1Response
-from deepgram.types.list_models_v1response import ListModelsV1Response
-from deepgram.types.get_model_v1response import GetModelV1Response
-from deepgram.types.get_model_v1response_batch import GetModelV1ResponseBatch
-from deepgram.types.get_model_v1response_metadata import GetModelV1ResponseMetadata
-
-
-class TestManageClient:
- """Test cases for Manage Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_manage_client_initialization(self, sync_client_wrapper):
- """Test ManageClient initialization."""
- client = ManageClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_async_manage_client_initialization(self, async_client_wrapper):
- """Test AsyncManageClient initialization."""
- client = AsyncManageClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_manage_client_v1_property_lazy_loading(self, sync_client_wrapper):
- """Test ManageClient v1 property lazy loading."""
- client = ManageClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, ManageV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_async_manage_client_v1_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncManageClient v1 property lazy loading."""
- client = AsyncManageClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, ManageAsyncV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_manage_client_raw_response_access(self, sync_client_wrapper):
- """Test ManageClient raw response access."""
- client = ManageClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_manage_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncManageClient raw response access."""
- client = AsyncManageClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_manage_client_integration_with_main_client(self, mock_api_key):
- """Test ManageClient integration with main DeepgramClient."""
- client = DeepgramClient(api_key=mock_api_key)
-
- manage_client = client.manage
- assert manage_client is not None
- assert isinstance(manage_client, ManageClient)
-
- def test_async_manage_client_integration_with_main_client(self, mock_api_key):
- """Test AsyncManageClient integration with main AsyncDeepgramClient."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- manage_client = client.manage
- assert manage_client is not None
- assert isinstance(manage_client, AsyncManageClient)
-
-
-class TestManageV1Client:
- """Test cases for Manage V1 Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_manage_v1_client_initialization(self, sync_client_wrapper):
- """Test ManageV1Client initialization."""
- client = ManageV1Client(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._projects is None # Lazy loaded
- assert client._models is None # Lazy loaded
-
- def test_async_manage_v1_client_initialization(self, async_client_wrapper):
- """Test AsyncManageV1Client initialization."""
- client = ManageAsyncV1Client(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._projects is None # Lazy loaded
- assert client._models is None # Lazy loaded
-
- def test_manage_v1_client_projects_property_lazy_loading(self, sync_client_wrapper):
- """Test ManageV1Client projects property lazy loading."""
- client = ManageV1Client(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._projects is None
-
- # Access triggers lazy loading
- projects_client = client.projects
- assert client._projects is not None
- assert isinstance(projects_client, ProjectsClient)
-
- # Subsequent access returns same instance
- assert client.projects is projects_client
-
- def test_async_manage_v1_client_projects_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncManageV1Client projects property lazy loading."""
- client = ManageAsyncV1Client(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._projects is None
-
- # Access triggers lazy loading
- projects_client = client.projects
- assert client._projects is not None
- assert isinstance(projects_client, AsyncProjectsClient)
-
- # Subsequent access returns same instance
- assert client.projects is projects_client
-
- def test_manage_v1_client_models_property_lazy_loading(self, sync_client_wrapper):
- """Test ManageV1Client models property lazy loading."""
- client = ManageV1Client(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._models is None
-
- # Access triggers lazy loading
- models_client = client.models
- assert client._models is not None
- assert isinstance(models_client, ModelsClient)
-
- # Subsequent access returns same instance
- assert client.models is models_client
-
- def test_async_manage_v1_client_models_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncManageV1Client models property lazy loading."""
- client = ManageAsyncV1Client(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._models is None
-
- # Access triggers lazy loading
- models_client = client.models
- assert client._models is not None
- assert isinstance(models_client, AsyncModelsClient)
-
- # Subsequent access returns same instance
- assert client.models is models_client
-
-
-class TestProjectsClient:
- """Test cases for Projects Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_projects_list_response(self):
- """Mock projects list response data."""
- return {
- "projects": [
- {
- "project_id": "project-123",
- "name": "Test Project 1",
- "company": "Test Company"
- },
- {
- "project_id": "project-456",
- "name": "Test Project 2",
- "company": "Test Company"
- }
- ]
- }
-
- @pytest.fixture
- def mock_project_get_response(self):
- """Mock project get response data."""
- return {
- "project_id": "project-123",
- "name": "Test Project",
- "company": "Test Company"
- }
-
- def test_projects_client_initialization(self, sync_client_wrapper):
- """Test ProjectsClient initialization."""
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_projects_client_initialization(self, async_client_wrapper):
- """Test AsyncProjectsClient initialization."""
- client = AsyncProjectsClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_projects_client_raw_response_access(self, sync_client_wrapper):
- """Test ProjectsClient raw response access."""
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_projects_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncProjectsClient raw response access."""
- client = AsyncProjectsClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.list')
- def test_projects_client_list(self, mock_list, sync_client_wrapper, mock_projects_list_response):
- """Test ProjectsClient list method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = ListProjectsV1Response(**mock_projects_list_response)
- mock_list.return_value = mock_response
-
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- result = client.list()
-
- assert result is not None
- assert isinstance(result, ListProjectsV1Response)
- assert len(result.projects) == 2
- assert result.projects[0].project_id == "project-123"
-
- # Verify raw client was called with correct parameters
- mock_list.assert_called_once_with(request_options=None)
-
- @patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.get')
- def test_projects_client_get(self, mock_get, sync_client_wrapper, mock_project_get_response):
- """Test ProjectsClient get method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = GetProjectV1Response(**mock_project_get_response)
- mock_get.return_value = mock_response
-
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- result = client.get(project_id)
-
- assert result is not None
- assert isinstance(result, GetProjectV1Response)
- assert result.project_id == project_id
-
- # Verify raw client was called with correct parameters
- mock_get.assert_called_once_with(
- project_id,
- limit=None,
- page=None,
- request_options=None
- )
-
- @patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.list')
- def test_projects_client_list_with_request_options(self, mock_list, sync_client_wrapper, mock_projects_list_response):
- """Test ProjectsClient list with request options."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = ListProjectsV1Response(**mock_projects_list_response)
- mock_list.return_value = mock_response
-
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- request_options = RequestOptions(
- additional_headers={"X-Custom-Header": "test-value"}
- )
- result = client.list(request_options=request_options)
-
- assert result is not None
- assert isinstance(result, ListProjectsV1Response)
-
- # Verify raw client was called with request options
- mock_list.assert_called_once_with(request_options=request_options)
-
- @patch('deepgram.manage.v1.projects.raw_client.AsyncRawProjectsClient.list')
- @pytest.mark.asyncio
- async def test_async_projects_client_list(self, mock_list, async_client_wrapper, mock_projects_list_response):
- """Test AsyncProjectsClient list method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = ListProjectsV1Response(**mock_projects_list_response)
- mock_list.return_value = mock_response
-
- client = AsyncProjectsClient(client_wrapper=async_client_wrapper)
-
- result = await client.list()
-
- assert result is not None
- assert isinstance(result, ListProjectsV1Response)
- assert len(result.projects) == 2
- assert result.projects[0].project_id == "project-123"
-
- # Verify async raw client was called with correct parameters
- mock_list.assert_called_once_with(request_options=None)
-
- @patch('deepgram.manage.v1.projects.raw_client.AsyncRawProjectsClient.get')
- @pytest.mark.asyncio
- async def test_async_projects_client_get(self, mock_get, async_client_wrapper, mock_project_get_response):
- """Test AsyncProjectsClient get method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = GetProjectV1Response(**mock_project_get_response)
- mock_get.return_value = mock_response
-
- client = AsyncProjectsClient(client_wrapper=async_client_wrapper)
-
- project_id = "project-456"
- result = await client.get(project_id, limit=10, page=1)
-
- assert result is not None
- assert isinstance(result, GetProjectV1Response)
- assert result.project_id == "project-123" # From mock response
-
- # Verify async raw client was called with correct parameters
- mock_get.assert_called_once_with(
- project_id,
- limit=10,
- page=1,
- request_options=None
- )
-
-
-class TestModelsClient:
- """Test cases for Models Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_models_list_response(self):
- """Mock models list response data."""
- return {
- "models": [
- {
- "model_id": "nova-2-general",
- "name": "Nova 2 General",
- "canonical_name": "nova-2-general",
- "architecture": "nova-2",
- "language": "en",
- "version": "2024-01-09",
- "uuid": "uuid-123",
- "batch": False,
- "streaming": True
- },
- {
- "model_id": "nova-2-medical",
- "name": "Nova 2 Medical",
- "canonical_name": "nova-2-medical",
- "architecture": "nova-2",
- "language": "en",
- "version": "2024-01-09",
- "uuid": "uuid-456",
- "batch": True,
- "streaming": True
- }
- ]
- }
-
- @pytest.fixture
- def mock_model_get_response(self):
- """Mock model get response data."""
- return {
- "model_id": "nova-2-general",
- "name": "Nova 2 General",
- "canonical_name": "nova-2-general",
- "architecture": "nova-2",
- "language": "en",
- "version": "2024-01-09",
- "uuid": "uuid-123",
- "batch": False,
- "streaming": True
- }
-
- def test_models_client_initialization(self, sync_client_wrapper):
- """Test ModelsClient initialization."""
- client = ModelsClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_models_client_initialization(self, async_client_wrapper):
- """Test AsyncModelsClient initialization."""
- client = AsyncModelsClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_models_client_raw_response_access(self, sync_client_wrapper):
- """Test ModelsClient raw response access."""
- client = ModelsClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_models_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncModelsClient raw response access."""
- client = AsyncModelsClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.manage.v1.models.raw_client.RawModelsClient.list')
- def test_models_client_list(self, mock_list, sync_client_wrapper, mock_models_list_response):
- """Test ModelsClient list method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = ListModelsV1Response(**mock_models_list_response)
- mock_list.return_value = mock_response
-
- client = ModelsClient(client_wrapper=sync_client_wrapper)
-
- result = client.list()
-
- assert result is not None
- assert isinstance(result, ListModelsV1Response)
- assert len(result.models) == 2
- assert result.models[0]["model_id"] == "nova-2-general"
-
- # Verify raw client was called with correct parameters
- mock_list.assert_called_once_with(include_outdated=None, request_options=None)
-
- @patch('deepgram.manage.v1.models.raw_client.RawModelsClient.list')
- def test_models_client_list_include_outdated(self, mock_list, sync_client_wrapper, mock_models_list_response):
- """Test ModelsClient list with include_outdated parameter."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = ListModelsV1Response(**mock_models_list_response)
- mock_list.return_value = mock_response
-
- client = ModelsClient(client_wrapper=sync_client_wrapper)
-
- result = client.list(include_outdated=True)
-
- assert result is not None
- assert isinstance(result, ListModelsV1Response)
-
- # Verify raw client was called with include_outdated parameter
- mock_list.assert_called_once_with(include_outdated=True, request_options=None)
-
- @patch('deepgram.manage.v1.models.raw_client.RawModelsClient.get')
- def test_models_client_get(self, mock_get, sync_client_wrapper, mock_model_get_response):
- """Test ModelsClient get method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = Mock(spec=GetModelV1ResponseBatch)
- # Set attributes from mock data
- for key, value in mock_model_get_response.items():
- setattr(mock_response.data, key, value)
- mock_get.return_value = mock_response
-
- client = ModelsClient(client_wrapper=sync_client_wrapper)
-
- model_id = "nova-2-general"
- result = client.get(model_id)
-
- assert result is not None
- assert isinstance(result, (GetModelV1ResponseBatch, GetModelV1ResponseMetadata))
- assert result.model_id == model_id
-
- # Verify raw client was called with correct parameters
- mock_get.assert_called_once_with(model_id, request_options=None)
-
- @patch('deepgram.manage.v1.models.raw_client.AsyncRawModelsClient.list')
- @pytest.mark.asyncio
- async def test_async_models_client_list(self, mock_list, async_client_wrapper, mock_models_list_response):
- """Test AsyncModelsClient list method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = ListModelsV1Response(**mock_models_list_response)
- mock_list.return_value = mock_response
-
- client = AsyncModelsClient(client_wrapper=async_client_wrapper)
-
- result = await client.list(include_outdated=False)
-
- assert result is not None
- assert isinstance(result, ListModelsV1Response)
- assert len(result.models) == 2
- assert result.models[1]["model_id"] == "nova-2-medical"
-
- # Verify async raw client was called with correct parameters
- mock_list.assert_called_once_with(include_outdated=False, request_options=None)
-
- @patch('deepgram.manage.v1.models.raw_client.AsyncRawModelsClient.get')
- @pytest.mark.asyncio
- async def test_async_models_client_get(self, mock_get, async_client_wrapper, mock_model_get_response):
- """Test AsyncModelsClient get method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = Mock(spec=GetModelV1ResponseBatch)
- # Set attributes from mock data
- for key, value in mock_model_get_response.items():
- setattr(mock_response.data, key, value)
- mock_get.return_value = mock_response
-
- client = AsyncModelsClient(client_wrapper=async_client_wrapper)
-
- model_id = "nova-2-medical"
- result = await client.get(model_id)
-
- assert result is not None
- assert isinstance(result, (GetModelV1ResponseBatch, GetModelV1ResponseMetadata))
- assert result.model_id == "nova-2-general" # From mock response
-
- # Verify async raw client was called with correct parameters
- mock_get.assert_called_once_with(model_id, request_options=None)
-
-
-class TestManageIntegrationScenarios:
- """Test Manage integration scenarios."""
-
- def test_complete_manage_workflow_sync(self, mock_api_key):
- """Test complete Manage workflow using sync client."""
- with patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.list') as mock_list:
- # Mock the response
- mock_response = Mock()
- mock_response.data = Mock(spec=ListProjectsV1Response)
- mock_project = Mock()
- mock_project.project_id = "project-123"
- mock_project.name = "Test Project"
- mock_project.company = "Test Company"
- mock_response.data.projects = [mock_project]
- mock_list.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested manage functionality
- result = client.manage.v1.projects.list()
-
- assert result is not None
- assert isinstance(result, ListProjectsV1Response)
- assert len(result.projects) == 1
-
- # Verify the call was made
- mock_list.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_manage_workflow_async(self, mock_api_key):
- """Test complete Manage workflow using async client."""
- with patch('deepgram.manage.v1.models.raw_client.AsyncRawModelsClient.list') as mock_list:
- # Mock the async response
- mock_response = Mock()
- mock_response.data = ListModelsV1Response(
- models=[
- Mock(model_id="nova-2-general", name="Nova 2 General")
- ]
- )
- mock_list.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested manage functionality
- result = await client.manage.v1.models.list()
-
- assert result is not None
- assert isinstance(result, ListModelsV1Response)
- assert len(result.models) == 1
-
- # Verify the call was made
- mock_list.assert_called_once()
-
- def test_manage_client_property_isolation(self, mock_api_key):
- """Test that manage clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- manage1 = client1.manage
- manage2 = client2.manage
-
- # Verify they are different instances
- assert manage1 is not manage2
- assert manage1._client_wrapper is not manage2._client_wrapper
-
- # Verify nested clients are also different
- projects1 = manage1.v1.projects
- projects2 = manage2.v1.projects
-
- assert projects1 is not projects2
-
- def test_manage_nested_client_access(self, mock_api_key):
- """Test accessing deeply nested manage clients."""
- client = DeepgramClient(api_key=mock_api_key)
-
- # Test access to v1 clients
- manage_v1_projects = client.manage.v1.projects
- manage_v1_models = client.manage.v1.models
-
- # Verify all are properly initialized
- assert manage_v1_projects is not None
- assert manage_v1_models is not None
-
- # Verify they are different client types
- assert type(manage_v1_projects).__name__ == 'ProjectsClient'
- assert type(manage_v1_models).__name__ == 'ModelsClient'
-
-
-class TestManageErrorHandling:
- """Test Manage client error handling."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.list')
- def test_projects_client_api_error_handling(self, mock_list, sync_client_wrapper):
- """Test ProjectsClient API error handling."""
- # Mock an API error
- mock_list.side_effect = ApiError(
- status_code=403,
- headers={},
- body="Insufficient permissions"
- )
-
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- client.list()
-
- assert exc_info.value.status_code == 403
- assert "Insufficient permissions" in str(exc_info.value.body)
-
- @patch('deepgram.manage.v1.models.raw_client.AsyncRawModelsClient.get')
- @pytest.mark.asyncio
- async def test_async_models_client_api_error_handling(self, mock_get, async_client_wrapper):
- """Test AsyncModelsClient API error handling."""
- # Mock an API error
- mock_get.side_effect = ApiError(
- status_code=404,
- headers={},
- body="Model not found"
- )
-
- client = AsyncModelsClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- await client.get("non-existent-model")
-
- assert exc_info.value.status_code == 404
- assert "Model not found" in str(exc_info.value.body)
-
- @patch('deepgram.manage.v1.projects.raw_client.RawProjectsClient.get')
- def test_projects_client_network_error_handling(self, mock_get, sync_client_wrapper):
- """Test ProjectsClient network error handling."""
- # Mock a network error
- mock_get.side_effect = httpx.ConnectError("Connection failed")
-
- client = ProjectsClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(httpx.ConnectError):
- client.get("project-123")
-
- def test_client_wrapper_integration(self, sync_client_wrapper):
- """Test integration with client wrapper."""
- client = ManageClient(client_wrapper=sync_client_wrapper)
-
- # Test that client wrapper methods are accessible
- assert hasattr(client._client_wrapper, 'get_environment')
- assert hasattr(client._client_wrapper, 'get_headers')
- assert hasattr(client._client_wrapper, 'api_key')
-
- environment = client._client_wrapper.get_environment()
- headers = client._client_wrapper.get_headers()
- api_key = client._client_wrapper.api_key
-
- assert environment is not None
- assert isinstance(headers, dict)
- assert api_key is not None
diff --git a/tests/integrations/test_read_client.py b/tests/integrations/test_read_client.py
deleted file mode 100644
index 1bd07b57..00000000
--- a/tests/integrations/test_read_client.py
+++ /dev/null
@@ -1,772 +0,0 @@
-"""Integration tests for Read client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch
-import httpx
-import json
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-
-from deepgram.read.client import ReadClient, AsyncReadClient
-from deepgram.read.v1.client import V1Client as ReadV1Client, AsyncV1Client as ReadAsyncV1Client
-from deepgram.read.v1.text.client import TextClient, AsyncTextClient
-
-# Import request and response types for mocking
-from deepgram.requests.read_v1request_text import ReadV1RequestTextParams
-from deepgram.requests.read_v1request_url import ReadV1RequestUrlParams
-from deepgram.types.read_v1response import ReadV1Response
-from deepgram.read.v1.text.types.text_analyze_request_callback_method import TextAnalyzeRequestCallbackMethod
-from deepgram.read.v1.text.types.text_analyze_request_summarize import TextAnalyzeRequestSummarize
-from deepgram.read.v1.text.types.text_analyze_request_custom_topic_mode import TextAnalyzeRequestCustomTopicMode
-from deepgram.read.v1.text.types.text_analyze_request_custom_intent_mode import TextAnalyzeRequestCustomIntentMode
-
-
-class TestReadClient:
- """Test cases for Read Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_read_client_initialization(self, sync_client_wrapper):
- """Test ReadClient initialization."""
- client = ReadClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_async_read_client_initialization(self, async_client_wrapper):
- """Test AsyncReadClient initialization."""
- client = AsyncReadClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_read_client_v1_property_lazy_loading(self, sync_client_wrapper):
- """Test ReadClient v1 property lazy loading."""
- client = ReadClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, ReadV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_async_read_client_v1_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncReadClient v1 property lazy loading."""
- client = AsyncReadClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, ReadAsyncV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_read_client_raw_response_access(self, sync_client_wrapper):
- """Test ReadClient raw response access."""
- client = ReadClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_read_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncReadClient raw response access."""
- client = AsyncReadClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_read_client_integration_with_main_client(self, mock_api_key):
- """Test ReadClient integration with main DeepgramClient."""
- client = DeepgramClient(api_key=mock_api_key)
-
- read_client = client.read
- assert read_client is not None
- assert isinstance(read_client, ReadClient)
-
- def test_async_read_client_integration_with_main_client(self, mock_api_key):
- """Test AsyncReadClient integration with main AsyncDeepgramClient."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- read_client = client.read
- assert read_client is not None
- assert isinstance(read_client, AsyncReadClient)
-
-
-class TestReadV1Client:
- """Test cases for Read V1 Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_read_v1_client_initialization(self, sync_client_wrapper):
- """Test ReadV1Client initialization."""
- client = ReadV1Client(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._text is None # Lazy loaded
-
- def test_async_read_v1_client_initialization(self, async_client_wrapper):
- """Test AsyncReadV1Client initialization."""
- client = ReadAsyncV1Client(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._text is None # Lazy loaded
-
- def test_read_v1_client_text_property_lazy_loading(self, sync_client_wrapper):
- """Test ReadV1Client text property lazy loading."""
- client = ReadV1Client(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._text is None
-
- # Access triggers lazy loading
- text_client = client.text
- assert client._text is not None
- assert isinstance(text_client, TextClient)
-
- # Subsequent access returns same instance
- assert client.text is text_client
-
- def test_async_read_v1_client_text_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncReadV1Client text property lazy loading."""
- client = ReadAsyncV1Client(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._text is None
-
- # Access triggers lazy loading
- text_client = client.text
- assert client._text is not None
- assert isinstance(text_client, AsyncTextClient)
-
- # Subsequent access returns same instance
- assert client.text is text_client
-
- def test_read_v1_client_raw_response_access(self, sync_client_wrapper):
- """Test ReadV1Client raw response access."""
- client = ReadV1Client(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_read_v1_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncReadV1Client raw response access."""
- client = ReadAsyncV1Client(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
-
-class TestTextClient:
- """Test cases for Text Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_text_request_url(self):
- """Mock text analysis request with URL."""
- return ReadV1RequestUrlParams(url="https://example.com/article.html")
-
- @pytest.fixture
- def mock_text_request_text(self):
- """Mock text analysis request with direct text."""
- return ReadV1RequestTextParams(
- text="This is a sample text for analysis. It contains positive sentiment and discusses technology topics."
- )
-
- @pytest.fixture
- def mock_text_analysis_response(self):
- """Mock text analysis response data."""
- from deepgram.types.read_v1response_metadata import ReadV1ResponseMetadata
- from deepgram.types.read_v1response_results import ReadV1ResponseResults
-
- return ReadV1Response(
- metadata=ReadV1ResponseMetadata(),
- results=ReadV1ResponseResults()
- )
-
- def test_text_client_initialization(self, sync_client_wrapper):
- """Test TextClient initialization."""
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_text_client_initialization(self, async_client_wrapper):
- """Test AsyncTextClient initialization."""
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_text_client_raw_response_access(self, sync_client_wrapper):
- """Test TextClient raw response access."""
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_text_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncTextClient raw response access."""
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_analyze_url(self, mock_analyze, sync_client_wrapper, mock_text_request_url, mock_text_analysis_response):
- """Test TextClient analyze method with URL."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- result = client.analyze(request=mock_text_request_url)
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
- assert result.metadata is not None
-
- # Verify raw client was called with correct parameters
- mock_analyze.assert_called_once_with(
- request=mock_text_request_url,
- callback=None,
- callback_method=None,
- sentiment=None,
- summarize=None,
- tag=None,
- topics=None,
- custom_topic=None,
- custom_topic_mode=None,
- intents=None,
- custom_intent=None,
- custom_intent_mode=None,
- language=None,
- request_options=None
- )
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_analyze_text_with_all_features(self, mock_analyze, sync_client_wrapper, mock_text_request_text, mock_text_analysis_response):
- """Test TextClient analyze method with text and all features enabled."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- result = client.analyze(
- request=mock_text_request_text,
- sentiment=True,
- summarize=True,
- topics=True,
- custom_topic=["technology", "AI"],
- custom_topic_mode="extended",
- intents=True,
- custom_intent=["inform", "explain"],
- custom_intent_mode="strict",
- language="en"
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
-
- # Verify raw client was called with all parameters
- mock_analyze.assert_called_once_with(
- request=mock_text_request_text,
- callback=None,
- callback_method=None,
- sentiment=True,
- summarize=True,
- tag=None,
- topics=True,
- custom_topic=["technology", "AI"],
- custom_topic_mode="extended",
- intents=True,
- custom_intent=["inform", "explain"],
- custom_intent_mode="strict",
- language="en",
- request_options=None
- )
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_analyze_with_callback(self, mock_analyze, sync_client_wrapper, mock_text_request_url, mock_text_analysis_response):
- """Test TextClient analyze method with callback configuration."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- callback_url = "https://example.com/callback"
- result = client.analyze(
- request=mock_text_request_url,
- callback=callback_url,
- callback_method="POST",
- sentiment=True
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
-
- # Verify raw client was called with callback parameters
- mock_analyze.assert_called_once_with(
- request=mock_text_request_url,
- callback=callback_url,
- callback_method="POST",
- sentiment=True,
- summarize=None,
- tag=None,
- topics=None,
- custom_topic=None,
- custom_topic_mode=None,
- intents=None,
- custom_intent=None,
- custom_intent_mode=None,
- language=None,
- request_options=None
- )
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_analyze_with_request_options(self, mock_analyze, sync_client_wrapper, mock_text_request_text, mock_text_analysis_response):
- """Test TextClient analyze method with request options."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- request_options = RequestOptions(
- additional_headers={"X-Custom-Header": "test-value"}
- )
- result = client.analyze(
- request=mock_text_request_text,
- topics=True,
- request_options=request_options
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
-
- # Verify raw client was called with request options
- mock_analyze.assert_called_once_with(
- request=mock_text_request_text,
- callback=None,
- callback_method=None,
- sentiment=None,
- summarize=None,
- tag=None,
- topics=True,
- custom_topic=None,
- custom_topic_mode=None,
- intents=None,
- custom_intent=None,
- custom_intent_mode=None,
- language=None,
- request_options=request_options
- )
-
- @patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze')
- @pytest.mark.asyncio
- async def test_async_text_client_analyze_url(self, mock_analyze, async_client_wrapper, mock_text_request_url, mock_text_analysis_response):
- """Test AsyncTextClient analyze method with URL."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
-
- result = await client.analyze(request=mock_text_request_url)
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
- assert result.metadata is not None
-
- # Verify async raw client was called with correct parameters
- mock_analyze.assert_called_once_with(
- request=mock_text_request_url,
- callback=None,
- callback_method=None,
- sentiment=None,
- summarize=None,
- tag=None,
- topics=None,
- custom_topic=None,
- custom_topic_mode=None,
- intents=None,
- custom_intent=None,
- custom_intent_mode=None,
- language=None,
- request_options=None
- )
-
- @patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze')
- @pytest.mark.asyncio
- async def test_async_text_client_analyze_with_all_features(self, mock_analyze, async_client_wrapper, mock_text_request_text, mock_text_analysis_response):
- """Test AsyncTextClient analyze method with all features enabled."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_text_analysis_response
- mock_analyze.return_value = mock_response
-
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
-
- result = await client.analyze(
- request=mock_text_request_text,
- sentiment=True,
- summarize=True,
- topics=True,
- custom_topic="machine learning",
- custom_topic_mode="strict",
- intents=True,
- custom_intent=["question", "request"],
- custom_intent_mode="extended",
- language="en"
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
-
- # Verify async raw client was called with all parameters
- mock_analyze.assert_called_once_with(
- request=mock_text_request_text,
- callback=None,
- callback_method=None,
- sentiment=True,
- summarize=True,
- tag=None,
- topics=True,
- custom_topic="machine learning",
- custom_topic_mode="strict",
- intents=True,
- custom_intent=["question", "request"],
- custom_intent_mode="extended",
- language="en",
- request_options=None
- )
-
-
-class TestReadIntegrationScenarios:
- """Test Read integration scenarios."""
-
- def test_complete_read_workflow_sync(self, mock_api_key):
- """Test complete Read workflow using sync client."""
- with patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze') as mock_analyze:
- # Mock the response
- mock_response = Mock()
- mock_response.data = Mock(spec=ReadV1Response)
- mock_response.data.metadata = Mock()
- mock_response.data.results = Mock()
- mock_response.data.results.summary = {"text": "Test summary"}
- mock_response.data.results.sentiments = {
- "average": {"sentiment": "positive", "sentiment_score": 0.7}
- }
- # Set request_id for assertion
- mock_response.data.request_id = "req-sync-123"
- mock_analyze.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Create request
- request = ReadV1RequestTextParams(text="This is a test text for sentiment analysis.")
-
- # Access nested read functionality
- result = client.read.v1.text.analyze(
- request=request,
- sentiment=True,
- summarize=True
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
- assert result.request_id == "req-sync-123"
-
- # Verify the call was made
- mock_analyze.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_read_workflow_async(self, mock_api_key):
- """Test complete Read workflow using async client."""
- with patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze') as mock_analyze:
- # Mock the async response
- mock_response = Mock()
- mock_response.data = Mock(spec=ReadV1Response)
- mock_response.data.metadata = Mock()
- mock_response.data.results = Mock()
- mock_response.data.results.topics = {
- "segments": [
- {
- "topics": [{"topic": "technology", "confidence_score": 0.9}]
- }
- ]
- }
- # Set request_id for assertion
- mock_response.data.request_id = "req-async-456"
- mock_analyze.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Create request
- request = ReadV1RequestUrlParams(url="https://example.com/tech-article.html")
-
- # Access nested read functionality
- result = await client.read.v1.text.analyze(
- request=request,
- topics=True,
- custom_topic=["AI", "machine learning"]
- )
-
- assert result is not None
- assert isinstance(result, ReadV1Response)
- assert result.request_id == "req-async-456"
-
- # Verify the call was made
- mock_analyze.assert_called_once()
-
- def test_read_client_property_isolation(self, mock_api_key):
- """Test that read clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- read1 = client1.read
- read2 = client2.read
-
- # Verify they are different instances
- assert read1 is not read2
- assert read1._client_wrapper is not read2._client_wrapper
-
- # Verify nested clients are also different
- text1 = read1.v1.text
- text2 = read2.v1.text
-
- assert text1 is not text2
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_read_clients(self, mock_api_key):
- """Test mixing sync and async read clients."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- sync_read = sync_client.read
- async_read = async_client.read
-
- # Verify they are different types
- assert type(sync_read) != type(async_read)
- assert isinstance(sync_read, ReadClient)
- assert isinstance(async_read, AsyncReadClient)
-
- # Verify nested clients are also different types
- sync_text = sync_read.v1.text
- async_text = async_read.v1.text
-
- assert type(sync_text) != type(async_text)
- assert isinstance(sync_text, TextClient)
- assert isinstance(async_text, AsyncTextClient)
-
-
-class TestReadErrorHandling:
- """Test Read client error handling."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_api_error_handling(self, mock_analyze, sync_client_wrapper):
- """Test TextClient API error handling."""
- # Mock an API error
- mock_analyze.side_effect = ApiError(
- status_code=400,
- headers={},
- body="Invalid request parameters"
- )
-
- client = TextClient(client_wrapper=sync_client_wrapper)
- request = ReadV1RequestTextParams(text="Test text")
-
- with pytest.raises(ApiError) as exc_info:
- client.analyze(request=request)
-
- assert exc_info.value.status_code == 400
- assert "Invalid request parameters" in str(exc_info.value.body)
-
- @patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze')
- @pytest.mark.asyncio
- async def test_async_text_client_api_error_handling(self, mock_analyze, async_client_wrapper):
- """Test AsyncTextClient API error handling."""
- # Mock an API error
- mock_analyze.side_effect = ApiError(
- status_code=429,
- headers={},
- body="Rate limit exceeded"
- )
-
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
- request = ReadV1RequestUrlParams(url="https://example.com/article.html")
-
- with pytest.raises(ApiError) as exc_info:
- await client.analyze(request=request)
-
- assert exc_info.value.status_code == 429
- assert "Rate limit exceeded" in str(exc_info.value.body)
-
- @patch('deepgram.read.v1.text.raw_client.RawTextClient.analyze')
- def test_text_client_network_error_handling(self, mock_analyze, sync_client_wrapper):
- """Test TextClient network error handling."""
- # Mock a network error
- mock_analyze.side_effect = httpx.ConnectError("Connection failed")
-
- client = TextClient(client_wrapper=sync_client_wrapper)
- request = ReadV1RequestTextParams(text="Test text")
-
- with pytest.raises(httpx.ConnectError):
- client.analyze(request=request)
-
- @patch('deepgram.read.v1.text.raw_client.AsyncRawTextClient.analyze')
- @pytest.mark.asyncio
- async def test_async_text_client_network_error_handling(self, mock_analyze, async_client_wrapper):
- """Test AsyncTextClient network error handling."""
- # Mock a network error
- mock_analyze.side_effect = httpx.ConnectError("Async connection failed")
-
- client = AsyncTextClient(client_wrapper=async_client_wrapper)
- request = ReadV1RequestUrlParams(url="https://example.com/article.html")
-
- with pytest.raises(httpx.ConnectError):
- await client.analyze(request=request)
-
- def test_invalid_request_parameters(self, sync_client_wrapper):
- """Test handling of invalid request parameters."""
- client = TextClient(client_wrapper=sync_client_wrapper)
-
- # Test with invalid request (None)
- with pytest.raises((TypeError, AttributeError)):
- client.analyze(request=None)
-
- def test_client_wrapper_integration(self, sync_client_wrapper):
- """Test integration with client wrapper."""
- client = ReadClient(client_wrapper=sync_client_wrapper)
-
- # Test that client wrapper methods are accessible
- assert hasattr(client._client_wrapper, 'get_environment')
- assert hasattr(client._client_wrapper, 'get_headers')
- assert hasattr(client._client_wrapper, 'api_key')
-
- environment = client._client_wrapper.get_environment()
- headers = client._client_wrapper.get_headers()
- api_key = client._client_wrapper.api_key
-
- assert environment is not None
- assert isinstance(headers, dict)
- assert api_key is not None
diff --git a/tests/integrations/test_self_hosted_client.py b/tests/integrations/test_self_hosted_client.py
deleted file mode 100644
index a10f2c38..00000000
--- a/tests/integrations/test_self_hosted_client.py
+++ /dev/null
@@ -1,736 +0,0 @@
-"""Integration tests for SelfHosted client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch
-import httpx
-import json
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-
-from deepgram.self_hosted.client import SelfHostedClient, AsyncSelfHostedClient
-from deepgram.self_hosted.v1.client import V1Client as SelfHostedV1Client, AsyncV1Client as SelfHostedAsyncV1Client
-from deepgram.self_hosted.v1.distribution_credentials.client import (
- DistributionCredentialsClient,
- AsyncDistributionCredentialsClient
-)
-
-# Import response types for mocking
-from deepgram.types.list_project_distribution_credentials_v1response import ListProjectDistributionCredentialsV1Response
-from deepgram.types.create_project_distribution_credentials_v1response import CreateProjectDistributionCredentialsV1Response
-from deepgram.types.get_project_distribution_credentials_v1response import GetProjectDistributionCredentialsV1Response
-from deepgram.self_hosted.v1.distribution_credentials.types.distribution_credentials_create_request_scopes_item import DistributionCredentialsCreateRequestScopesItem
-
-
-class TestSelfHostedClient:
- """Test cases for SelfHosted Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_self_hosted_client_initialization(self, sync_client_wrapper):
- """Test SelfHostedClient initialization."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_async_self_hosted_client_initialization(self, async_client_wrapper):
- """Test AsyncSelfHostedClient initialization."""
- client = AsyncSelfHostedClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._v1 is None # Lazy loaded
-
- def test_self_hosted_client_v1_property_lazy_loading(self, sync_client_wrapper):
- """Test SelfHostedClient v1 property lazy loading."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, SelfHostedV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_async_self_hosted_client_v1_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncSelfHostedClient v1 property lazy loading."""
- client = AsyncSelfHostedClient(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._v1 is None
-
- # Access triggers lazy loading
- v1_client = client.v1
- assert client._v1 is not None
- assert isinstance(v1_client, SelfHostedAsyncV1Client)
-
- # Subsequent access returns same instance
- assert client.v1 is v1_client
-
- def test_self_hosted_client_raw_response_access(self, sync_client_wrapper):
- """Test SelfHostedClient raw response access."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_self_hosted_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncSelfHostedClient raw response access."""
- client = AsyncSelfHostedClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_self_hosted_client_integration_with_main_client(self, mock_api_key):
- """Test SelfHostedClient integration with main DeepgramClient."""
- client = DeepgramClient(api_key=mock_api_key)
-
- self_hosted_client = client.self_hosted
- assert self_hosted_client is not None
- assert isinstance(self_hosted_client, SelfHostedClient)
-
- def test_async_self_hosted_client_integration_with_main_client(self, mock_api_key):
- """Test AsyncSelfHostedClient integration with main AsyncDeepgramClient."""
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- self_hosted_client = client.self_hosted
- assert self_hosted_client is not None
- assert isinstance(self_hosted_client, AsyncSelfHostedClient)
-
-
-class TestSelfHostedV1Client:
- """Test cases for SelfHosted V1 Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_self_hosted_v1_client_initialization(self, sync_client_wrapper):
- """Test SelfHostedV1Client initialization."""
- client = SelfHostedV1Client(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
- assert client._distribution_credentials is None # Lazy loaded
-
- def test_async_self_hosted_v1_client_initialization(self, async_client_wrapper):
- """Test AsyncSelfHostedV1Client initialization."""
- client = SelfHostedAsyncV1Client(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
- assert client._distribution_credentials is None # Lazy loaded
-
- def test_self_hosted_v1_client_distribution_credentials_property_lazy_loading(self, sync_client_wrapper):
- """Test SelfHostedV1Client distribution_credentials property lazy loading."""
- client = SelfHostedV1Client(client_wrapper=sync_client_wrapper)
-
- # Initially None
- assert client._distribution_credentials is None
-
- # Access triggers lazy loading
- dist_creds_client = client.distribution_credentials
- assert client._distribution_credentials is not None
- assert isinstance(dist_creds_client, DistributionCredentialsClient)
-
- # Subsequent access returns same instance
- assert client.distribution_credentials is dist_creds_client
-
- def test_async_self_hosted_v1_client_distribution_credentials_property_lazy_loading(self, async_client_wrapper):
- """Test AsyncSelfHostedV1Client distribution_credentials property lazy loading."""
- client = SelfHostedAsyncV1Client(client_wrapper=async_client_wrapper)
-
- # Initially None
- assert client._distribution_credentials is None
-
- # Access triggers lazy loading
- dist_creds_client = client.distribution_credentials
- assert client._distribution_credentials is not None
- assert isinstance(dist_creds_client, AsyncDistributionCredentialsClient)
-
- # Subsequent access returns same instance
- assert client.distribution_credentials is dist_creds_client
-
- def test_self_hosted_v1_client_raw_response_access(self, sync_client_wrapper):
- """Test SelfHostedV1Client raw response access."""
- client = SelfHostedV1Client(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_self_hosted_v1_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncSelfHostedV1Client raw response access."""
- client = SelfHostedAsyncV1Client(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
-
-class TestDistributionCredentialsClient:
- """Test cases for Distribution Credentials Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def mock_distribution_credentials_list_response(self):
- """Mock distribution credentials list response data."""
- mock_response = Mock(spec=ListProjectDistributionCredentialsV1Response)
- # Mock distribution credentials list
- mock_cred1 = Mock()
- mock_cred1.distribution_credentials_id = "cred-123"
- mock_cred2 = Mock()
- mock_cred2.distribution_credentials_id = "cred-456"
- mock_response.distribution_credentials = [mock_cred1, mock_cred2]
- return mock_response
-
- @pytest.fixture
- def mock_distribution_credentials_create_response(self):
- """Mock distribution credentials create response data."""
- mock_response = Mock(spec=CreateProjectDistributionCredentialsV1Response)
- mock_response.distribution_credentials_id = "cred-new-789"
- mock_response.username = "test_user"
- mock_response.password = "test_password"
- return mock_response
-
- @pytest.fixture
- def mock_distribution_credentials_get_response(self):
- """Mock distribution credentials get response data."""
- mock_response = Mock(spec=GetProjectDistributionCredentialsV1Response)
- mock_response.distribution_credentials_id = "cred-123"
- return mock_response
-
- def test_distribution_credentials_client_initialization(self, sync_client_wrapper):
- """Test DistributionCredentialsClient initialization."""
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_distribution_credentials_client_initialization(self, async_client_wrapper):
- """Test AsyncDistributionCredentialsClient initialization."""
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- assert client is not None
- assert client._raw_client is not None
-
- def test_distribution_credentials_client_raw_response_access(self, sync_client_wrapper):
- """Test DistributionCredentialsClient raw response access."""
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_distribution_credentials_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncDistributionCredentialsClient raw response access."""
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.list')
- def test_distribution_credentials_client_list(self, mock_list, sync_client_wrapper, mock_distribution_credentials_list_response):
- """Test DistributionCredentialsClient list method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_list_response
- mock_list.return_value = mock_response
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- result = client.list(project_id)
-
- assert result is not None
- assert isinstance(result, ListProjectDistributionCredentialsV1Response)
- # Basic assertion - response is valid
- # Response structure is valid
-
- # Verify raw client was called with correct parameters
- mock_list.assert_called_once_with(project_id, request_options=None)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.create')
- def test_distribution_credentials_client_create(self, mock_create, sync_client_wrapper, mock_distribution_credentials_create_response):
- """Test DistributionCredentialsClient create method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_create_response
- mock_create.return_value = mock_response
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- scopes = ["self-hosted:products", "self-hosted:product:api"]
- result = client.create(
- project_id,
- scopes=scopes,
- provider="quay",
- comment="Test credentials"
- )
-
- assert result is not None
- assert isinstance(result, CreateProjectDistributionCredentialsV1Response)
- assert result.distribution_credentials_id == "cred-new-789"
- assert result.username == "test_user"
- assert result.password == "test_password"
-
- # Verify raw client was called with correct parameters
- mock_create.assert_called_once_with(
- project_id,
- scopes=scopes,
- provider="quay",
- comment="Test credentials",
- request_options=None
- )
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.get')
- def test_distribution_credentials_client_get(self, mock_get, sync_client_wrapper, mock_distribution_credentials_get_response):
- """Test DistributionCredentialsClient get method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_get_response
- mock_get.return_value = mock_response
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- credentials_id = "cred-123"
- result = client.get(project_id, credentials_id)
-
- assert result is not None
- assert isinstance(result, GetProjectDistributionCredentialsV1Response)
- # Basic assertions - the response structure is valid
-
- # Verify raw client was called with correct parameters
- mock_get.assert_called_once_with(project_id, credentials_id, request_options=None)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.delete')
- def test_distribution_credentials_client_delete(self, mock_delete, sync_client_wrapper, mock_distribution_credentials_get_response):
- """Test DistributionCredentialsClient delete method."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_get_response
- mock_delete.return_value = mock_response
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- credentials_id = "cred-123"
- result = client.delete(project_id, credentials_id)
-
- assert result is not None
- assert isinstance(result, GetProjectDistributionCredentialsV1Response)
-
- # Verify raw client was called with correct parameters
- mock_delete.assert_called_once_with(project_id, credentials_id, request_options=None)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.list')
- def test_distribution_credentials_client_list_with_request_options(self, mock_list, sync_client_wrapper, mock_distribution_credentials_list_response):
- """Test DistributionCredentialsClient list with request options."""
- # Mock the raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_list_response
- mock_list.return_value = mock_response
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- project_id = "project-123"
- request_options = RequestOptions(
- additional_headers={"X-Custom-Header": "test-value"}
- )
- result = client.list(project_id, request_options=request_options)
-
- assert result is not None
- assert isinstance(result, ListProjectDistributionCredentialsV1Response)
-
- # Verify raw client was called with request options
- mock_list.assert_called_once_with(project_id, request_options=request_options)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.list')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_list(self, mock_list, async_client_wrapper, mock_distribution_credentials_list_response):
- """Test AsyncDistributionCredentialsClient list method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_list_response
- mock_list.return_value = mock_response
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- project_id = "project-456"
- result = await client.list(project_id)
-
- assert result is not None
- assert isinstance(result, ListProjectDistributionCredentialsV1Response)
- # Basic assertion - response is valid
- assert result.distribution_credentials[1].distribution_credentials_id == "cred-456"
-
- # Verify async raw client was called with correct parameters
- mock_list.assert_called_once_with(project_id, request_options=None)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.create')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_create(self, mock_create, async_client_wrapper, mock_distribution_credentials_create_response):
- """Test AsyncDistributionCredentialsClient create method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_create_response
- mock_create.return_value = mock_response
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- project_id = "project-456"
- scopes = ["self-hosted:products"]
- result = await client.create(
- project_id,
- scopes=scopes,
- provider="quay",
- comment="Async test credentials"
- )
-
- assert result is not None
- assert isinstance(result, CreateProjectDistributionCredentialsV1Response)
- assert result.distribution_credentials_id == "cred-new-789"
-
- # Verify async raw client was called with correct parameters
- mock_create.assert_called_once_with(
- project_id,
- scopes=scopes,
- provider="quay",
- comment="Async test credentials",
- request_options=None
- )
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.get')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_get(self, mock_get, async_client_wrapper, mock_distribution_credentials_get_response):
- """Test AsyncDistributionCredentialsClient get method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_get_response
- mock_get.return_value = mock_response
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- project_id = "project-456"
- credentials_id = "cred-456"
- result = await client.get(project_id, credentials_id)
-
- assert result is not None
- assert isinstance(result, GetProjectDistributionCredentialsV1Response)
- # Basic assertions - the response structure is valid # From mock response
-
- # Verify async raw client was called with correct parameters
- mock_get.assert_called_once_with(project_id, credentials_id, request_options=None)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.delete')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_delete(self, mock_delete, async_client_wrapper, mock_distribution_credentials_get_response):
- """Test AsyncDistributionCredentialsClient delete method."""
- # Mock the async raw client response
- mock_response = Mock()
- mock_response.data = mock_distribution_credentials_get_response
- mock_delete.return_value = mock_response
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- project_id = "project-456"
- credentials_id = "cred-456"
- result = await client.delete(project_id, credentials_id)
-
- assert result is not None
- assert isinstance(result, GetProjectDistributionCredentialsV1Response)
-
- # Verify async raw client was called with correct parameters
- mock_delete.assert_called_once_with(project_id, credentials_id, request_options=None)
-
-
-class TestSelfHostedIntegrationScenarios:
- """Test SelfHosted integration scenarios."""
-
- def test_complete_self_hosted_workflow_sync(self, mock_api_key):
- """Test complete SelfHosted workflow using sync client."""
- with patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.list') as mock_list:
- # Mock the response
- mock_response = Mock()
- mock_response.data = Mock(spec=ListProjectDistributionCredentialsV1Response)
- mock_credential = Mock()
- mock_credential.distribution_credentials_id = "cred-sync-123"
- mock_credential.comment = "Sync test credentials"
- mock_credential.scopes = ["read", "write"]
- mock_credential.provider = "quay"
- mock_response.data.distribution_credentials = [mock_credential]
- mock_list.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested self-hosted functionality
- result = client.self_hosted.v1.distribution_credentials.list("project-123")
-
- assert result is not None
- assert isinstance(result, ListProjectDistributionCredentialsV1Response)
- assert len(result.distribution_credentials) == 1
- assert result.distribution_credentials[0].distribution_credentials_id == "cred-sync-123"
-
- # Verify the call was made
- mock_list.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_self_hosted_workflow_async(self, mock_api_key):
- """Test complete SelfHosted workflow using async client."""
- with patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.create') as mock_create:
- # Mock the async response
- mock_response = Mock()
- mock_response.data = Mock(spec=CreateProjectDistributionCredentialsV1Response)
- mock_response.data.distribution_credentials_id = "cred-async-456"
- mock_response.data.comment = "Async test credentials"
- mock_response.data.scopes = ["read"]
- mock_response.data.provider = "quay"
- mock_response.data.username = "async_user"
- mock_response.data.password = "async_password"
- # Set required fields
- mock_response.data.member = Mock()
- mock_response.data.distribution_credentials = Mock()
- mock_create.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested self-hosted functionality
- result = await client.self_hosted.v1.distribution_credentials.create(
- "project-456",
- scopes=["self-hosted:products"],
- provider="quay"
- )
-
- assert result is not None
- assert isinstance(result, CreateProjectDistributionCredentialsV1Response)
- assert result.distribution_credentials_id == "cred-async-456"
- assert result.username == "async_user"
-
- # Verify the call was made
- mock_create.assert_called_once()
-
- def test_self_hosted_client_property_isolation(self, mock_api_key):
- """Test that self-hosted clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- self_hosted1 = client1.self_hosted
- self_hosted2 = client2.self_hosted
-
- # Verify they are different instances
- assert self_hosted1 is not self_hosted2
- assert self_hosted1._client_wrapper is not self_hosted2._client_wrapper
-
- # Verify nested clients are also different
- dist_creds1 = self_hosted1.v1.distribution_credentials
- dist_creds2 = self_hosted2.v1.distribution_credentials
-
- assert dist_creds1 is not dist_creds2
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_self_hosted_clients(self, mock_api_key):
- """Test mixing sync and async self-hosted clients."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- sync_self_hosted = sync_client.self_hosted
- async_self_hosted = async_client.self_hosted
-
- # Verify they are different types
- assert type(sync_self_hosted) != type(async_self_hosted)
- assert isinstance(sync_self_hosted, SelfHostedClient)
- assert isinstance(async_self_hosted, AsyncSelfHostedClient)
-
- # Verify nested clients are also different types
- sync_dist_creds = sync_self_hosted.v1.distribution_credentials
- async_dist_creds = async_self_hosted.v1.distribution_credentials
-
- assert type(sync_dist_creds) != type(async_dist_creds)
- assert isinstance(sync_dist_creds, DistributionCredentialsClient)
- assert isinstance(async_dist_creds, AsyncDistributionCredentialsClient)
-
-
-class TestSelfHostedErrorHandling:
- """Test SelfHosted client error handling."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock(spec=httpx.Client)
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock(spec=httpx.AsyncClient)
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.list')
- def test_distribution_credentials_client_api_error_handling(self, mock_list, sync_client_wrapper):
- """Test DistributionCredentialsClient API error handling."""
- # Mock an API error
- mock_list.side_effect = ApiError(
- status_code=404,
- headers={},
- body="Project not found"
- )
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- client.list("non-existent-project")
-
- assert exc_info.value.status_code == 404
- assert "Project not found" in str(exc_info.value.body)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.create')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_api_error_handling(self, mock_create, async_client_wrapper):
- """Test AsyncDistributionCredentialsClient API error handling."""
- # Mock an API error
- mock_create.side_effect = ApiError(
- status_code=400,
- headers={},
- body="Invalid scopes provided"
- )
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises(ApiError) as exc_info:
- await client.create(
- "project-123",
- scopes=["invalid_scope"],
- provider="quay"
- )
-
- assert exc_info.value.status_code == 400
- assert "Invalid scopes provided" in str(exc_info.value.body)
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.RawDistributionCredentialsClient.get')
- def test_distribution_credentials_client_network_error_handling(self, mock_get, sync_client_wrapper):
- """Test DistributionCredentialsClient network error handling."""
- # Mock a network error
- mock_get.side_effect = httpx.ConnectError("Connection failed")
-
- client = DistributionCredentialsClient(client_wrapper=sync_client_wrapper)
-
- with pytest.raises(httpx.ConnectError):
- client.get("project-123", "cred-123")
-
- @patch('deepgram.self_hosted.v1.distribution_credentials.raw_client.AsyncRawDistributionCredentialsClient.delete')
- @pytest.mark.asyncio
- async def test_async_distribution_credentials_client_network_error_handling(self, mock_delete, async_client_wrapper):
- """Test AsyncDistributionCredentialsClient network error handling."""
- # Mock a network error
- mock_delete.side_effect = httpx.ConnectError("Async connection failed")
-
- client = AsyncDistributionCredentialsClient(client_wrapper=async_client_wrapper)
-
- with pytest.raises(httpx.ConnectError):
- await client.delete("project-456", "cred-456")
-
- def test_client_wrapper_integration(self, sync_client_wrapper):
- """Test integration with client wrapper."""
- client = SelfHostedClient(client_wrapper=sync_client_wrapper)
-
- # Test that client wrapper methods are accessible
- assert hasattr(client._client_wrapper, 'get_environment')
- assert hasattr(client._client_wrapper, 'get_headers')
- assert hasattr(client._client_wrapper, 'api_key')
-
- environment = client._client_wrapper.get_environment()
- headers = client._client_wrapper.get_headers()
- api_key = client._client_wrapper.api_key
-
- assert environment is not None
- assert isinstance(headers, dict)
- assert api_key is not None
diff --git a/tests/integrations/test_speak_client.py b/tests/integrations/test_speak_client.py
deleted file mode 100644
index 2b722f3a..00000000
--- a/tests/integrations/test_speak_client.py
+++ /dev/null
@@ -1,763 +0,0 @@
-"""Integration tests for Speak client implementations."""
-
-import pytest
-from unittest.mock import Mock, AsyncMock, patch, MagicMock
-from contextlib import contextmanager, asynccontextmanager
-import httpx
-import websockets.exceptions
-import json
-import asyncio
-from json.decoder import JSONDecodeError
-
-from deepgram import DeepgramClient, AsyncDeepgramClient
-from deepgram.core.client_wrapper import SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.api_error import ApiError
-from deepgram.core.request_options import RequestOptions
-from deepgram.core.events import EventType
-from deepgram.environment import DeepgramClientEnvironment
-
-# Import Speak clients
-from deepgram.speak.client import SpeakClient, AsyncSpeakClient
-from deepgram.speak.v1.client import V1Client as SpeakV1Client, AsyncV1Client as SpeakAsyncV1Client
-
-# Import Speak raw clients
-from deepgram.speak.v1.raw_client import RawV1Client as SpeakRawV1Client, AsyncRawV1Client as SpeakAsyncRawV1Client
-
-# Import Speak socket clients
-from deepgram.speak.v1.socket_client import V1SocketClient as SpeakV1SocketClient, AsyncV1SocketClient as SpeakAsyncV1SocketClient
-
-# Import Speak audio clients
-from deepgram.speak.v1.audio.client import AudioClient, AsyncAudioClient
-
-# Import socket message types
-from deepgram.extensions.types.sockets import (
- SpeakV1TextMessage,
- SpeakV1ControlMessage,
-)
-
-# Import request and response types for mocking
-from deepgram.speak.v1.audio.types.audio_generate_request_callback_method import AudioGenerateRequestCallbackMethod
-from deepgram.speak.v1.audio.types.audio_generate_request_container import AudioGenerateRequestContainer
-from deepgram.speak.v1.audio.types.audio_generate_request_encoding import AudioGenerateRequestEncoding
-from deepgram.speak.v1.audio.types.audio_generate_request_model import AudioGenerateRequestModel
-
-
-class TestSpeakClient:
- """Test cases for Speak Client."""
-
- def test_speak_client_initialization(self, mock_api_key):
- """Test SpeakClient initialization."""
- client = DeepgramClient(api_key=mock_api_key).speak
- assert client is not None
- assert hasattr(client, 'v1')
-
- def test_async_speak_client_initialization(self, mock_api_key):
- """Test AsyncSpeakClient initialization."""
- client = AsyncDeepgramClient(api_key=mock_api_key).speak
- assert client is not None
- assert hasattr(client, 'v1')
-
- def test_speak_client_with_raw_response(self, mock_api_key):
- """Test SpeakClient with_raw_response property."""
- client = DeepgramClient(api_key=mock_api_key).speak
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
- def test_async_speak_client_with_raw_response(self, mock_api_key):
- """Test AsyncSpeakClient with_raw_response property."""
- client = AsyncDeepgramClient(api_key=mock_api_key).speak
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert hasattr(raw_client, '_client_wrapper')
-
-
-class TestSpeakRawV1Client:
- """Test cases for Speak V1 Raw Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=Mock(),
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=AsyncMock(),
- timeout=60.0
- )
-
- def test_sync_speak_raw_client_initialization(self, sync_client_wrapper):
- """Test synchronous speak raw client initialization."""
- client = SpeakRawV1Client(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._client_wrapper is sync_client_wrapper
-
- def test_async_speak_raw_client_initialization(self, async_client_wrapper):
- """Test asynchronous speak raw client initialization."""
- client = SpeakAsyncRawV1Client(client_wrapper=async_client_wrapper)
- assert client is not None
- assert client._client_wrapper is async_client_wrapper
-
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_sync_speak_connect_success(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test successful synchronous Speak WebSocket connection."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = SpeakRawV1Client(client_wrapper=sync_client_wrapper)
-
- with client.connect() as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_sync_speak_connect_with_parameters(self, mock_websocket_connect, sync_client_wrapper, mock_websocket):
- """Test synchronous Speak connection with parameters."""
- mock_websocket_connect.return_value.__enter__ = Mock(return_value=mock_websocket)
- mock_websocket_connect.return_value.__exit__ = Mock(return_value=None)
-
- client = SpeakRawV1Client(client_wrapper=sync_client_wrapper)
-
- with client.connect(
- model="aura-asteria-en",
- encoding="linear16",
- sample_rate="24000"
- ) as connection:
- assert connection is not None
-
- @patch('deepgram.speak.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_speak_connect_success(self, mock_websocket_connect, async_client_wrapper, mock_async_websocket):
- """Test successful asynchronous Speak WebSocket connection."""
- mock_websocket_connect.return_value.__aenter__ = AsyncMock(return_value=mock_async_websocket)
- mock_websocket_connect.return_value.__aexit__ = AsyncMock(return_value=None)
-
- client = SpeakAsyncRawV1Client(client_wrapper=async_client_wrapper)
-
- async with client.connect() as connection:
- assert connection is not None
- assert hasattr(connection, '_websocket')
-
- def test_speak_query_params_construction(self, sync_client_wrapper):
- """Test Speak query parameters are properly constructed."""
- client = SpeakRawV1Client(client_wrapper=sync_client_wrapper)
-
- # Mock the websocket connection to capture the URL
- with patch('websockets.sync.client.connect') as mock_connect:
- mock_connect.return_value.__enter__ = Mock(return_value=Mock())
- mock_connect.return_value.__exit__ = Mock(return_value=None)
-
- try:
- with client.connect(
- model="aura-asteria-en",
- encoding="linear16",
- sample_rate="24000"
- ) as connection:
- pass
- except:
- pass # We just want to check the URL construction
-
- # Verify the URL was constructed with query parameters
- call_args = mock_connect.call_args
- if call_args and len(call_args[0]) > 0:
- url = call_args[0][0]
- assert "model=aura-asteria-en" in url
- assert "encoding=linear16" in url
- assert "sample_rate=24000" in url
-
-
-class TestSpeakV1SocketClient:
- """Test cases for Speak V1 Socket Client."""
-
- def test_speak_sync_socket_client_initialization(self):
- """Test Speak synchronous socket client initialization."""
- mock_ws = Mock()
- client = SpeakV1SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_speak_async_socket_client_initialization(self):
- """Test Speak asynchronous socket client initialization."""
- mock_ws = AsyncMock()
- client = SpeakAsyncV1SocketClient(websocket=mock_ws)
-
- assert client is not None
- assert client._websocket is mock_ws
-
- def test_speak_sync_send_text(self):
- """Test Speak synchronous text message sending."""
- mock_ws = Mock()
- client = SpeakV1SocketClient(websocket=mock_ws)
-
- # Mock text message
- mock_text_msg = Mock(spec=SpeakV1TextMessage)
- mock_text_msg.dict.return_value = {"type": "Speak", "text": "Hello world"}
-
- client.send_text(mock_text_msg)
-
- mock_text_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- def test_speak_sync_send_control(self):
- """Test Speak synchronous control message sending."""
- mock_ws = Mock()
- client = SpeakV1SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=SpeakV1ControlMessage)
- mock_control_msg.dict.return_value = {"type": "Flush"}
-
- client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_speak_async_send_text(self):
- """Test Speak asynchronous text message sending."""
- mock_ws = AsyncMock()
- client = SpeakAsyncV1SocketClient(websocket=mock_ws)
-
- # Mock text message
- mock_text_msg = Mock(spec=SpeakV1TextMessage)
- mock_text_msg.dict.return_value = {"type": "Speak", "text": "Hello world"}
-
- await client.send_text(mock_text_msg)
-
- mock_text_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_speak_async_send_control(self):
- """Test Speak asynchronous control message sending."""
- mock_ws = AsyncMock()
- client = SpeakAsyncV1SocketClient(websocket=mock_ws)
-
- # Mock control message
- mock_control_msg = Mock(spec=SpeakV1ControlMessage)
- mock_control_msg.dict.return_value = {"type": "Flush"}
-
- await client.send_control(mock_control_msg)
-
- mock_control_msg.dict.assert_called_once()
- mock_ws.send.assert_called_once()
-
-
-class TestSpeakAudioClient:
- """Test cases for Speak Audio Client."""
-
- @pytest.fixture
- def sync_client_wrapper(self, mock_api_key):
- """Create a sync client wrapper for testing."""
- mock_httpx_client = Mock()
- return SyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def async_client_wrapper(self, mock_api_key):
- """Create an async client wrapper for testing."""
- mock_httpx_client = AsyncMock()
- return AsyncClientWrapper(
- environment=DeepgramClientEnvironment.PRODUCTION,
- api_key=mock_api_key,
- headers={},
- httpx_client=mock_httpx_client,
- timeout=60.0
- )
-
- @pytest.fixture
- def sample_audio_chunks(self):
- """Sample audio chunks for testing."""
- return [
- b'\x00\x01\x02\x03\x04\x05',
- b'\x06\x07\x08\x09\x0a\x0b',
- b'\x0c\x0d\x0e\x0f\x10\x11'
- ]
-
- def test_audio_client_initialization(self, sync_client_wrapper):
- """Test AudioClient initialization."""
- client = AudioClient(client_wrapper=sync_client_wrapper)
- assert client is not None
- assert client._raw_client is not None
-
- def test_async_audio_client_initialization(self, async_client_wrapper):
- """Test AsyncAudioClient initialization."""
- client = AsyncAudioClient(client_wrapper=async_client_wrapper)
- assert client is not None
- assert client._raw_client is not None
-
- def test_audio_client_raw_response_access(self, sync_client_wrapper):
- """Test AudioClient raw response access."""
- client = AudioClient(client_wrapper=sync_client_wrapper)
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- def test_async_audio_client_raw_response_access(self, async_client_wrapper):
- """Test AsyncAudioClient raw response access."""
- client = AsyncAudioClient(client_wrapper=async_client_wrapper)
- raw_client = client.with_raw_response
- assert raw_client is not None
- assert raw_client is client._raw_client
-
- @patch('deepgram.speak.v1.audio.raw_client.RawAudioClient.generate')
- def test_audio_client_generate(self, mock_generate, sync_client_wrapper, sample_audio_chunks):
- """Test AudioClient generate method."""
- # Mock the raw client response with context manager
- mock_response = Mock()
- mock_data_response = Mock()
- mock_data_response.data = iter(sample_audio_chunks)
- mock_response.__enter__ = Mock(return_value=mock_data_response)
- mock_response.__exit__ = Mock(return_value=None)
- mock_generate.return_value = mock_response
-
- client = AudioClient(client_wrapper=sync_client_wrapper)
-
- response = client.generate(
- text="Hello, world!",
- model="aura-asteria-en"
- )
- audio_chunks = list(response)
- assert len(audio_chunks) == 3
- assert audio_chunks[0] == sample_audio_chunks[0]
-
- # Verify the call was made
- mock_generate.assert_called_once()
-
- @patch('deepgram.speak.v1.audio.raw_client.RawAudioClient.generate')
- def test_audio_client_generate_with_all_options(self, mock_generate, sync_client_wrapper, sample_audio_chunks):
- """Test AudioClient generate with all options."""
- # Mock the raw client response with context manager
- mock_response = Mock()
- mock_data_response = Mock()
- mock_data_response.data = iter(sample_audio_chunks)
- mock_response.__enter__ = Mock(return_value=mock_data_response)
- mock_response.__exit__ = Mock(return_value=None)
- mock_generate.return_value = mock_response
-
- client = AudioClient(client_wrapper=sync_client_wrapper)
-
- response = client.generate(
- text="Hello, world!",
- model="aura-asteria-en",
- encoding="linear16",
- container="wav",
- sample_rate=22050,
- callback="https://example.com/callback",
- callback_method="POST"
- )
- audio_chunks = list(response)
- assert len(audio_chunks) == 3
-
- # Verify the call was made with all parameters
- mock_generate.assert_called_once()
- call_args = mock_generate.call_args
- assert "model" in call_args[1]
- assert "encoding" in call_args[1]
- assert "sample_rate" in call_args[1]
-
- @patch('deepgram.speak.v1.audio.raw_client.AsyncRawAudioClient.generate')
- @pytest.mark.asyncio
- async def test_async_audio_client_generate(self, mock_generate, async_client_wrapper, sample_audio_chunks):
- """Test AsyncAudioClient generate method."""
- # Mock the async raw client response with context manager
- mock_response = AsyncMock()
- mock_data_response = AsyncMock()
-
- async def mock_aiter_data():
- for chunk in sample_audio_chunks:
- yield chunk
-
- mock_data_response.data = mock_aiter_data()
- mock_response.__aenter__ = AsyncMock(return_value=mock_data_response)
- mock_response.__aexit__ = AsyncMock(return_value=None)
- mock_generate.return_value = mock_response
-
- client = AsyncAudioClient(client_wrapper=async_client_wrapper)
-
- response = client.generate(
- text="Hello, world!",
- model="aura-asteria-en"
- )
- audio_chunks = []
- async for chunk in response:
- audio_chunks.append(chunk)
-
- assert len(audio_chunks) == 3
- assert audio_chunks[0] == sample_audio_chunks[0]
-
- # Verify the call was made
- mock_generate.assert_called_once()
-
- @patch('deepgram.speak.v1.audio.raw_client.AsyncRawAudioClient.generate')
- @pytest.mark.asyncio
- async def test_async_audio_client_generate_with_options(self, mock_generate, async_client_wrapper, sample_audio_chunks):
- """Test AsyncAudioClient generate with options."""
- # Mock the async raw client response with context manager
- mock_response = AsyncMock()
- mock_data_response = AsyncMock()
-
- async def mock_aiter_data():
- for chunk in sample_audio_chunks:
- yield chunk
-
- mock_data_response.data = mock_aiter_data()
- mock_response.__aenter__ = AsyncMock(return_value=mock_data_response)
- mock_response.__aexit__ = AsyncMock(return_value=None)
- mock_generate.return_value = mock_response
-
- client = AsyncAudioClient(client_wrapper=async_client_wrapper)
-
- response = client.generate(
- text="Hello, world!",
- model="aura-asteria-en",
- encoding="linear16",
- sample_rate=22050
- )
- audio_chunks = []
- async for chunk in response:
- audio_chunks.append(chunk)
-
- assert len(audio_chunks) == 3
-
- # Verify the call was made
- mock_generate.assert_called_once()
- call_args = mock_generate.call_args
-
- assert call_args[1]["sample_rate"] == 22050
-
-
-class TestSpeakIntegrationScenarios:
- """Test Speak API integration scenarios."""
-
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_speak_tts_workflow(self, mock_websocket_connect, mock_api_key, sample_text):
- """Test complete Speak TTS workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.recv = Mock(side_effect=[
- b'\x00\x01\x02\x03', # Audio chunk
- '{"type": "Metadata", "request_id": "speak-123", "model_name": "aura-asteria-en", "model_version": "1.0", "model_uuid": "uuid-123"}'
- ])
- mock_ws.__iter__ = Mock(return_value=iter([
- b'\x00\x01\x02\x03', # Audio chunk
- '{"type": "Metadata", "request_id": "speak-123", "model_name": "aura-asteria-en", "model_version": "1.0", "model_uuid": "uuid-123"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Connect and send text
- with client.speak.v1.with_raw_response.connect() as connection:
- # Send text message
- connection.send_text(Mock())
-
- # Send control message
- connection.send_control(Mock())
-
- # Receive audio data
- result = connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- @patch('deepgram.speak.v1.socket_client.V1SocketClient._handle_json_message')
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_speak_event_driven_workflow(self, mock_websocket_connect, mock_handle_json, mock_api_key):
- """Test Speak event-driven workflow."""
- # Mock websocket connection
- mock_ws = Mock()
- mock_ws.send = Mock()
- mock_ws.__iter__ = Mock(return_value=iter([
- '{"type": "Metadata", "request_id": "speak-event-123"}'
- ]))
- mock_ws.__enter__ = Mock(return_value=mock_ws)
- mock_ws.__exit__ = Mock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Mock the JSON message handler to return simple objects
- mock_handle_json.return_value = {"type": "Metadata", "request_id": "speak-event-123"}
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Mock event handlers
- on_open = Mock()
- on_message = Mock()
- on_close = Mock()
- on_error = Mock()
-
- # Connect with event handlers
- with client.speak.v1.with_raw_response.connect() as connection:
- # Set up event handlers
- connection.on(EventType.OPEN, on_open)
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, on_close)
- connection.on(EventType.ERROR, on_error)
-
- # Start listening (this will process the mock messages)
- connection.start_listening()
-
- # Verify event handlers were set up
- assert hasattr(connection, 'on')
-
- @patch('deepgram.speak.v1.raw_client.websockets_client_connect')
- @pytest.mark.asyncio
- async def test_async_speak_tts_workflow(self, mock_websocket_connect, mock_api_key):
- """Test async Speak TTS workflow."""
- # Mock async websocket connection
- mock_ws = AsyncMock()
- mock_ws.send = AsyncMock()
- mock_ws.recv = AsyncMock(side_effect=[
- b'\x00\x01\x02\x03', # Audio chunk
- '{"type": "Metadata", "request_id": "async-speak-123"}'
- ])
-
- async def mock_aiter():
- yield b'\x00\x01\x02\x03' # Audio chunk
- yield '{"type": "Metadata", "request_id": "async-speak-123"}'
-
- mock_ws.__aiter__ = Mock(return_value=mock_aiter())
- mock_ws.__aenter__ = AsyncMock(return_value=mock_ws)
- mock_ws.__aexit__ = AsyncMock(return_value=None)
- mock_websocket_connect.return_value = mock_ws
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Connect and send text
- async with client.speak.v1.with_raw_response.connect() as connection:
- # Send text message
- await connection.send_text(Mock())
-
- # Send control message
- await connection.send_control(Mock())
-
- # Receive audio data
- result = await connection.recv()
- assert result is not None
-
- # Verify websocket operations
- mock_ws.send.assert_called()
-
- def test_complete_speak_audio_workflow_sync(self, mock_api_key):
- """Test complete Speak Audio workflow using sync client."""
- with patch('deepgram.speak.v1.audio.raw_client.RawAudioClient.generate') as mock_generate:
- # Mock the response with context manager
- mock_response = Mock()
- mock_data_response = Mock()
- mock_data_response.data = iter([
- b'\x00\x01\x02\x03',
- b'\x04\x05\x06\x07',
- b'\x08\x09\x0a\x0b'
- ])
- mock_response.__enter__ = Mock(return_value=mock_data_response)
- mock_response.__exit__ = Mock(return_value=None)
- mock_generate.return_value = mock_response
-
- # Initialize client
- client = DeepgramClient(api_key=mock_api_key)
-
- # Access nested speak audio functionality
- response = client.speak.v1.audio.generate(
- text="Hello, this is a test of the Deepgram TTS API.",
- model="aura-asteria-en",
- encoding="linear16",
- sample_rate=24000
- )
- audio_chunks = list(response)
- assert len(audio_chunks) == 3
- assert audio_chunks[0] == b'\x00\x01\x02\x03'
-
- # Verify the call was made
- mock_generate.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_complete_speak_audio_workflow_async(self, mock_api_key):
- """Test complete Speak Audio workflow using async client."""
- with patch('deepgram.speak.v1.audio.raw_client.AsyncRawAudioClient.generate') as mock_generate:
- # Mock the async response with context manager
- mock_response = AsyncMock()
- mock_data_response = AsyncMock()
-
- async def mock_aiter_data():
- yield b'\x00\x01\x02\x03'
- yield b'\x04\x05\x06\x07'
- yield b'\x08\x09\x0a\x0b'
-
- mock_data_response.data = mock_aiter_data()
- mock_response.__aenter__ = AsyncMock(return_value=mock_data_response)
- mock_response.__aexit__ = AsyncMock(return_value=None)
- mock_generate.return_value = mock_response
-
- # Initialize async client
- client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Access nested speak audio functionality
- response = client.speak.v1.audio.generate(
- text="Hello, this is an async test of the Deepgram TTS API.",
- model="aura-asteria-en",
- encoding="linear16"
- )
- audio_chunks = []
- async for chunk in response:
- audio_chunks.append(chunk)
-
- assert len(audio_chunks) == 3
- assert audio_chunks[0] == b'\x00\x01\x02\x03'
-
- # Verify the call was made
- mock_generate.assert_called_once()
-
- def test_speak_client_property_isolation(self, mock_api_key):
- """Test that speak clients are properly isolated between instances."""
- client1 = DeepgramClient(api_key=mock_api_key)
- client2 = DeepgramClient(api_key=mock_api_key)
-
- # Verify clients are different instances
- assert client1.speak is not client2.speak
-
- # Verify nested clients are also different
- speak1 = client1.speak.v1
- speak2 = client2.speak.v1
-
- assert speak1 is not speak2
-
- @pytest.mark.asyncio
- async def test_mixed_sync_async_speak_clients(self, mock_api_key):
- """Test mixing sync and async speak clients."""
- sync_client = DeepgramClient(api_key=mock_api_key)
- async_client = AsyncDeepgramClient(api_key=mock_api_key)
-
- # Verify clients are different types
- assert type(sync_client.speak) != type(async_client.speak)
-
- # Verify nested clients are also different types
- sync_speak = sync_client.speak.v1
- async_speak = async_client.speak.v1
-
- assert type(sync_speak) != type(async_speak)
- assert isinstance(sync_speak, SpeakV1Client)
- assert isinstance(async_speak, SpeakAsyncV1Client)
-
-
-class TestSpeakErrorHandling:
- """Test Speak client error handling."""
-
- @patch('deepgram.speak.v1.audio.raw_client.RawAudioClient.generate')
- def test_audio_client_api_error_handling(self, mock_generate, mock_api_key):
- """Test AudioClient API error handling."""
- # Mock an API error
- mock_generate.side_effect = ApiError(
- status_code=400,
- headers={},
- body="Invalid request parameters"
- )
-
- client = DeepgramClient(api_key=mock_api_key).speak.v1.audio
-
- with pytest.raises(ApiError) as exc_info:
- response = client.generate(text="Hello world")
- list(response)
-
- assert exc_info.value.status_code == 400
- assert "Invalid request parameters" in str(exc_info.value.body)
-
- @patch('deepgram.speak.v1.audio.raw_client.AsyncRawAudioClient.generate')
- @pytest.mark.asyncio
- async def test_async_audio_client_api_error_handling(self, mock_generate, mock_api_key):
- """Test AsyncAudioClient API error handling."""
- # Mock an API error
- mock_generate.side_effect = ApiError(
- status_code=429,
- headers={},
- body="Rate limit exceeded"
- )
-
- client = AsyncDeepgramClient(api_key=mock_api_key).speak.v1.audio
-
- with pytest.raises(ApiError) as exc_info:
- response = client.generate(text="Hello world")
- async for chunk in response:
- pass
-
- assert exc_info.value.status_code == 429
- assert "Rate limit exceeded" in str(exc_info.value.body)
-
- @patch('deepgram.speak.v1.audio.raw_client.RawAudioClient.generate')
- def test_audio_client_network_error_handling(self, mock_generate, mock_api_key):
- """Test AudioClient network error handling."""
- # Mock a network error
- mock_generate.side_effect = httpx.ConnectError("Connection failed")
-
- client = DeepgramClient(api_key=mock_api_key).speak.v1.audio
-
- with pytest.raises(httpx.ConnectError):
- response = client.generate(text="Hello world")
- list(response)
-
- @patch('deepgram.speak.v1.audio.raw_client.AsyncRawAudioClient.generate')
- @pytest.mark.asyncio
- async def test_async_audio_client_network_error_handling(self, mock_generate, mock_api_key):
- """Test AsyncAudioClient network error handling."""
- # Mock a network error
- mock_generate.side_effect = httpx.ConnectError("Async connection failed")
-
- client = AsyncDeepgramClient(api_key=mock_api_key).speak.v1.audio
-
- with pytest.raises(httpx.ConnectError):
- response = client.generate(text="Hello world")
- async for chunk in response:
- pass
-
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_websocket_connection_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test WebSocket connection error handling."""
- mock_websocket_connect.side_effect = websockets.exceptions.ConnectionClosedError(None, None)
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(websockets.exceptions.ConnectionClosedError):
- with client.speak.v1.with_raw_response.connect() as connection:
- pass
-
- @patch('deepgram.speak.v1.raw_client.websockets_sync_client.connect')
- def test_generic_websocket_error_handling(self, mock_websocket_connect, mock_api_key):
- """Test generic WebSocket error handling."""
- mock_websocket_connect.side_effect = Exception("Generic WebSocket error")
-
- client = DeepgramClient(api_key=mock_api_key)
-
- with pytest.raises(Exception) as exc_info:
- with client.speak.v1.with_raw_response.connect() as connection:
- pass
-
- assert "Generic WebSocket error" in str(exc_info.value)
-
- def test_client_wrapper_integration(self, mock_api_key):
- """Test integration with client wrapper."""
- client = DeepgramClient(api_key=mock_api_key).speak.v1.audio
- assert client._raw_client is not None
- assert client._raw_client._client_wrapper.api_key == mock_api_key
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
deleted file mode 100644
index 535fa1c9..00000000
--- a/tests/unit/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Unit tests for Deepgram Python SDK
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
deleted file mode 100644
index 87ed0698..00000000
--- a/tests/unit/conftest.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Shared fixtures and configuration for unit tests.
-"""
-from typing import Any, Dict
-
-import pytest
-
-
-@pytest.fixture
-def sample_timestamp():
- """Sample timestamp for testing."""
- return "2023-01-01T00:00:00Z"
-
-
-@pytest.fixture
-def sample_request_id():
- """Sample request ID for testing."""
- return "test-request-123"
-
-
-@pytest.fixture
-def sample_channel_data():
- """Sample channel data for testing."""
- return [0, 1]
-
-
-@pytest.fixture
-def sample_audio_data():
- """Sample binary audio data for testing."""
- return b"\x00\x01\x02\x03\x04\x05" * 100
-
-
-@pytest.fixture
-def sample_transcription_text():
- """Sample transcription text."""
- return "Hello, this is a test transcription."
-
-
-@pytest.fixture
-def sample_metadata():
- """Sample metadata for various events."""
- return {
- "request_id": "test-request-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1
- }
-
-
-@pytest.fixture
-def sample_function_call():
- """Sample function call data for Agent testing."""
- return {
- "id": "func-123",
- "name": "get_weather",
- "arguments": '{"location": "New York"}',
- "client_side": False
- }
-
-
-@pytest.fixture
-def valid_model_data():
- """Factory for creating valid model test data."""
- def _create_data(model_type: str, **overrides) -> Dict[str, Any]:
- """Create valid data for different model types."""
- base_data = {
- "listen_v1_metadata": {
- "type": "Metadata",
- "request_id": "test-123",
- "sha256": "abc123",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.0,
- "channels": 1
- },
- "listen_v1_results": {
- "type": "Results",
- "channel_index": [0],
- "duration": 1.0,
- "start": 0.0,
- "is_final": True,
- "channel": {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": []
- }
- ]
- },
- "metadata": {
- "request_id": "test-123",
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- },
- "model_uuid": "model-uuid-123"
- }
- },
- "speak_v1_metadata": {
- "type": "Metadata",
- "request_id": "speak-123",
- "model_name": "aura-asteria-en",
- "model_version": "1.0",
- "model_uuid": "uuid-123"
- },
- "agent_v1_welcome": {
- "type": "Welcome",
- "request_id": "req-123"
- },
- "agent_v1_conversation_text": {
- "type": "ConversationText",
- "role": "assistant",
- "content": "Hello!"
- },
- "agent_v1_function_call_request": {
- "type": "FunctionCallRequest",
- "functions": [
- {
- "id": "func-123",
- "name": "get_weather",
- "arguments": "{}",
- "client_side": False
- }
- ]
- }
- }
-
- data = base_data.get(model_type, {})
- data.update(overrides)
- return data
-
- return _create_data
-
-
-@pytest.fixture
-def invalid_model_data():
- """Factory for creating invalid model test data."""
- def _create_invalid_data(model_type: str, field_to_break: str) -> Dict[str, Any]:
- """Create invalid data by removing or corrupting specific fields."""
- valid_data = {
- "listen_v1_metadata": {
- "type": "Metadata",
- "request_id": "test-123",
- "sha256": "abc123",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.0,
- "channels": 1
- }
- }
-
- data = valid_data.get(model_type, {}).copy()
-
- # Remove or corrupt the specified field
- if field_to_break in data:
- if field_to_break == "type":
- data[field_to_break] = "InvalidType"
- elif field_to_break in ["duration", "channels"]:
- data[field_to_break] = "not_a_number"
- else:
- del data[field_to_break]
-
- return data
-
- return _create_invalid_data
diff --git a/tests/unit/test_agent_v1_models.py b/tests/unit/test_agent_v1_models.py
deleted file mode 100644
index 97dd6526..00000000
--- a/tests/unit/test_agent_v1_models.py
+++ /dev/null
@@ -1,661 +0,0 @@
-"""
-Unit tests for Agent V1 socket event models.
-"""
-import pytest
-from pydantic import ValidationError
-
-from deepgram.extensions.types.sockets.agent_v1_agent_started_speaking_event import AgentV1AgentStartedSpeakingEvent
-from deepgram.extensions.types.sockets.agent_v1_agent_thinking_event import AgentV1AgentThinkingEvent
-from deepgram.extensions.types.sockets.agent_v1_control_message import AgentV1ControlMessage
-from deepgram.extensions.types.sockets.agent_v1_conversation_text_event import AgentV1ConversationTextEvent
-from deepgram.extensions.types.sockets.agent_v1_error_event import AgentV1ErrorEvent
-from deepgram.extensions.types.sockets.agent_v1_function_call_request_event import AgentV1FunctionCallRequestEvent
-from deepgram.extensions.types.sockets.agent_v1_function_call_response_message import AgentV1FunctionCallResponseMessage
-from deepgram.extensions.types.sockets.agent_v1_warning_event import AgentV1WarningEvent
-from deepgram.extensions.types.sockets.agent_v1_welcome_message import AgentV1WelcomeMessage
-
-
-class TestAgentV1WelcomeMessage:
- """Test AgentV1WelcomeMessage model."""
-
- def test_valid_welcome_message(self, valid_model_data):
- """Test creating a valid welcome message."""
- data = valid_model_data("agent_v1_welcome")
- message = AgentV1WelcomeMessage(**data)
-
- assert message.type == "Welcome"
- assert message.request_id == "req-123"
-
- def test_welcome_message_serialization(self, valid_model_data):
- """Test welcome message serialization."""
- data = valid_model_data("agent_v1_welcome")
- message = AgentV1WelcomeMessage(**data)
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "Welcome"
- assert message_dict["request_id"] == "req-123"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"Welcome"' in json_str
- assert '"request_id":"req-123"' in json_str
-
- def test_welcome_message_missing_required_fields(self):
- """Test welcome message with missing required fields."""
- # Missing request_id
- with pytest.raises(ValidationError) as exc_info:
- AgentV1WelcomeMessage(
- type="Welcome"
- )
- assert "request_id" in str(exc_info.value)
-
- def test_welcome_message_wrong_type(self):
- """Test welcome message with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- AgentV1WelcomeMessage(
- type="ConversationText", # Wrong type
- request_id="req-123"
- )
- assert "Input should be 'Welcome'" in str(exc_info.value)
-
-
-class TestAgentV1ConversationTextEvent:
- """Test AgentV1ConversationTextEvent model."""
-
- def test_valid_conversation_text_event(self, valid_model_data):
- """Test creating a valid conversation text event."""
- data = valid_model_data("agent_v1_conversation_text")
- event = AgentV1ConversationTextEvent(**data)
-
- assert event.type == "ConversationText"
- assert event.role == "assistant"
- assert event.content == "Hello!"
-
- def test_conversation_text_event_serialization(self, valid_model_data):
- """Test conversation text event serialization."""
- data = valid_model_data("agent_v1_conversation_text")
- event = AgentV1ConversationTextEvent(**data)
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "ConversationText"
- assert event_dict["role"] == "assistant"
- assert event_dict["content"] == "Hello!"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"ConversationText"' in json_str
- assert '"role":"assistant"' in json_str
-
- def test_conversation_text_event_missing_required_fields(self):
- """Test conversation text event with missing required fields."""
- # Missing role
- with pytest.raises(ValidationError) as exc_info:
- AgentV1ConversationTextEvent(
- type="ConversationText",
- content="Hello!"
- )
- assert "role" in str(exc_info.value)
-
- # Missing content
- with pytest.raises(ValidationError) as exc_info:
- AgentV1ConversationTextEvent(
- type="ConversationText",
- role="assistant"
- )
- assert "content" in str(exc_info.value)
-
- def test_conversation_text_event_valid_roles(self):
- """Test conversation text event with valid roles."""
- valid_roles = ["user", "assistant"]
-
- for role in valid_roles:
- event = AgentV1ConversationTextEvent(
- type="ConversationText",
- role=role,
- content="Test content"
- )
- assert event.role == role
-
- def test_conversation_text_event_invalid_role(self):
- """Test conversation text event with invalid role."""
- with pytest.raises(ValidationError) as exc_info:
- AgentV1ConversationTextEvent(
- type="ConversationText",
- role="system", # Invalid role
- content="Hello!"
- )
- assert "Input should be 'user' or 'assistant'" in str(exc_info.value)
-
- def test_conversation_text_event_empty_content(self):
- """Test conversation text event with empty content."""
- event = AgentV1ConversationTextEvent(
- type="ConversationText",
- role="assistant",
- content=""
- )
-
- assert event.content == ""
-
- def test_conversation_text_event_long_content(self):
- """Test conversation text event with very long content."""
- long_content = "This is a very long message. " * 1000 # ~30KB
- event = AgentV1ConversationTextEvent(
- type="ConversationText",
- role="assistant",
- content=long_content
- )
-
- assert len(event.content) > 20000
-
-
-class TestAgentV1FunctionCallRequestEvent:
- """Test AgentV1FunctionCallRequestEvent model."""
-
- def test_valid_function_call_request_event(self, valid_model_data):
- """Test creating a valid function call request event."""
- data = valid_model_data("agent_v1_function_call_request")
- event = AgentV1FunctionCallRequestEvent(**data)
-
- assert event.type == "FunctionCallRequest"
- assert len(event.functions) == 1
- assert event.functions[0].id == "func-123"
- assert event.functions[0].name == "get_weather"
- assert event.functions[0].arguments == "{}"
- assert event.functions[0].client_side is False
-
- def test_function_call_request_event_serialization(self, valid_model_data):
- """Test function call request event serialization."""
- data = valid_model_data("agent_v1_function_call_request")
- event = AgentV1FunctionCallRequestEvent(**data)
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "FunctionCallRequest"
- assert len(event_dict["functions"]) == 1
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"FunctionCallRequest"' in json_str
- assert '"name":"get_weather"' in json_str
-
- def test_function_call_request_event_missing_required_fields(self):
- """Test function call request event with missing required fields."""
- # Missing functions
- with pytest.raises(ValidationError) as exc_info:
- AgentV1FunctionCallRequestEvent(
- type="FunctionCallRequest"
- )
- assert "functions" in str(exc_info.value)
-
- def test_function_call_request_event_empty_functions(self):
- """Test function call request event with empty functions list."""
- event = AgentV1FunctionCallRequestEvent(
- type="FunctionCallRequest",
- functions=[]
- )
-
- assert event.type == "FunctionCallRequest"
- assert len(event.functions) == 0
-
- def test_function_call_request_event_multiple_functions(self, sample_function_call):
- """Test function call request event with multiple functions."""
- functions = [
- sample_function_call,
- {
- "id": "func-456",
- "name": "get_time",
- "arguments": '{"timezone": "UTC"}',
- "client_side": True
- }
- ]
-
- event = AgentV1FunctionCallRequestEvent(
- type="FunctionCallRequest",
- functions=functions
- )
-
- assert len(event.functions) == 2
- assert event.functions[0].name == "get_weather"
- assert event.functions[1].name == "get_time"
- assert event.functions[1].client_side is True
-
- def test_function_call_request_event_invalid_function_structure(self):
- """Test function call request event with invalid function structure."""
- # Missing required function fields
- with pytest.raises(ValidationError) as exc_info:
- AgentV1FunctionCallRequestEvent(
- type="FunctionCallRequest",
- functions=[{
- "id": "func-123",
- "name": "get_weather"
- # Missing arguments and client_side
- }]
- )
- # The validation error should mention missing fields
- error_str = str(exc_info.value)
- assert "arguments" in error_str or "client_side" in error_str
-
-
-class TestAgentV1FunctionCallResponseMessage:
- """Test AgentV1FunctionCallResponseMessage model."""
-
- def test_valid_function_call_response_message(self):
- """Test creating a valid function call response message."""
- message = AgentV1FunctionCallResponseMessage(
- type="FunctionCallResponse",
- name="get_weather",
- content='{"temperature": 25, "condition": "sunny"}'
- )
-
- assert message.type == "FunctionCallResponse"
- assert message.name == "get_weather"
- assert message.content == '{"temperature": 25, "condition": "sunny"}'
-
- def test_function_call_response_message_serialization(self):
- """Test function call response message serialization."""
- message = AgentV1FunctionCallResponseMessage(
- type="FunctionCallResponse",
- name="get_weather",
- content='{"temperature": 25, "condition": "sunny"}'
- )
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "FunctionCallResponse"
- assert message_dict["name"] == "get_weather"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"FunctionCallResponse"' in json_str
- assert '"name":"get_weather"' in json_str
-
- def test_function_call_response_message_missing_required_fields(self):
- """Test function call response message with missing required fields."""
- # Missing name
- with pytest.raises(ValidationError) as exc_info:
- AgentV1FunctionCallResponseMessage(
- type="FunctionCallResponse",
- content='{"temperature": 25}'
- )
- assert "name" in str(exc_info.value)
-
- # Missing content
- with pytest.raises(ValidationError) as exc_info:
- AgentV1FunctionCallResponseMessage(
- type="FunctionCallResponse",
- name="get_weather"
- )
- assert "content" in str(exc_info.value)
-
-
-class TestAgentV1AgentThinkingEvent:
- """Test AgentV1AgentThinkingEvent model."""
-
- def test_valid_agent_thinking_event(self):
- """Test creating a valid agent thinking event."""
- event = AgentV1AgentThinkingEvent(
- type="AgentThinking",
- content="I'm thinking about your request..."
- )
-
- assert event.type == "AgentThinking"
- assert event.content == "I'm thinking about your request..."
-
- def test_agent_thinking_event_serialization(self):
- """Test agent thinking event serialization."""
- event = AgentV1AgentThinkingEvent(
- type="AgentThinking",
- content="Processing your request..."
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "AgentThinking"
- assert event_dict["content"] == "Processing your request..."
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"AgentThinking"' in json_str
- assert '"content":"Processing your request..."' in json_str
-
- def test_agent_thinking_event_wrong_type(self):
- """Test agent thinking event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- AgentV1AgentThinkingEvent(
- type="UserStartedSpeaking", # Wrong type
- content="Test content"
- )
- assert "Input should be 'AgentThinking'" in str(exc_info.value)
-
-
-class TestAgentV1AgentStartedSpeakingEvent:
- """Test AgentV1AgentStartedSpeakingEvent model."""
-
- def test_valid_agent_started_speaking_event(self):
- """Test creating a valid agent started speaking event."""
- event = AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency=150.5,
- tts_latency=50.2,
- ttt_latency=100.3
- )
-
- assert event.type == "AgentStartedSpeaking"
- assert event.total_latency == 150.5
- assert event.tts_latency == 50.2
- assert event.ttt_latency == 100.3
-
- def test_agent_started_speaking_event_serialization(self):
- """Test agent started speaking event serialization."""
- event = AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency=150.5,
- tts_latency=50.2,
- ttt_latency=100.3
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "AgentStartedSpeaking"
- assert event_dict["total_latency"] == 150.5
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"AgentStartedSpeaking"' in json_str
- assert '"total_latency":150.5' in json_str
-
- def test_agent_started_speaking_event_missing_required_fields(self):
- """Test agent started speaking event with missing required fields."""
- # Missing total_latency
- with pytest.raises(ValidationError) as exc_info:
- AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- tts_latency=50.2,
- ttt_latency=100.3
- )
- assert "total_latency" in str(exc_info.value)
-
- def test_agent_started_speaking_event_invalid_data_types(self):
- """Test agent started speaking event with invalid data types."""
- # Invalid total_latency type
- with pytest.raises(ValidationError) as exc_info:
- AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency="not_a_number",
- tts_latency=50.2,
- ttt_latency=100.3
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
-
-class TestAgentV1ErrorEvent:
- """Test AgentV1ErrorEvent model."""
-
- def test_valid_error_event(self):
- """Test creating a valid error event."""
- event = AgentV1ErrorEvent(
- type="Error",
- description="Function call failed",
- code="FUNCTION_CALL_ERROR"
- )
-
- assert event.type == "Error"
- assert event.description == "Function call failed"
- assert event.code == "FUNCTION_CALL_ERROR"
-
- def test_error_event_serialization(self):
- """Test error event serialization."""
- event = AgentV1ErrorEvent(
- type="Error",
- description="Function call failed",
- code="FUNCTION_CALL_ERROR"
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Error"
- assert event_dict["description"] == "Function call failed"
- assert event_dict["code"] == "FUNCTION_CALL_ERROR"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Error"' in json_str
- assert '"description":"Function call failed"' in json_str
-
- def test_error_event_missing_required_fields(self):
- """Test error event with missing required fields."""
- # Missing description
- with pytest.raises(ValidationError) as exc_info:
- AgentV1ErrorEvent(
- type="Error",
- code="FUNCTION_CALL_ERROR"
- )
- assert "description" in str(exc_info.value)
-
- # Missing code
- with pytest.raises(ValidationError) as exc_info:
- AgentV1ErrorEvent(
- type="Error",
- description="Function call failed"
- )
- assert "code" in str(exc_info.value)
-
-
-class TestAgentV1WarningEvent:
- """Test AgentV1WarningEvent model."""
-
- def test_valid_warning_event(self):
- """Test creating a valid warning event."""
- event = AgentV1WarningEvent(
- type="Warning",
- description="Connection quality degraded",
- code="CONNECTION_WARNING"
- )
-
- assert event.type == "Warning"
- assert event.description == "Connection quality degraded"
- assert event.code == "CONNECTION_WARNING"
-
- def test_warning_event_serialization(self):
- """Test warning event serialization."""
- event = AgentV1WarningEvent(
- type="Warning",
- description="Connection quality degraded",
- code="CONNECTION_WARNING"
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Warning"
- assert event_dict["description"] == "Connection quality degraded"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Warning"' in json_str
-
-
-class TestAgentV1ControlMessage:
- """Test AgentV1ControlMessage model."""
-
- def test_valid_control_message(self):
- """Test creating a valid control message."""
- message = AgentV1ControlMessage(
- type="KeepAlive"
- )
-
- assert message.type == "KeepAlive"
-
- def test_control_message_serialization(self):
- """Test control message serialization."""
- message = AgentV1ControlMessage(type="KeepAlive")
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "KeepAlive"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"KeepAlive"' in json_str
-
-
-class TestAgentV1MediaMessage:
- """Test AgentV1MediaMessage model."""
-
- def test_valid_media_message(self, sample_audio_data):
- """Test creating a valid media message."""
- # AgentV1MediaMessage is typically just bytes
- assert isinstance(sample_audio_data, bytes)
- assert len(sample_audio_data) > 0
-
- def test_empty_media_message(self):
- """Test empty media message."""
- empty_data = b""
- assert isinstance(empty_data, bytes)
- assert len(empty_data) == 0
-
-
-class TestAgentV1ModelIntegration:
- """Integration tests for Agent V1 models."""
-
- def test_model_roundtrip_serialization(self, valid_model_data):
- """Test that models can be serialized and deserialized."""
- # Test conversation text event roundtrip
- conversation_data = valid_model_data("agent_v1_conversation_text")
- original_event = AgentV1ConversationTextEvent(**conversation_data)
-
- # Serialize to JSON and back
- json_str = original_event.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_event = AgentV1ConversationTextEvent(**parsed_data)
-
- assert original_event.type == reconstructed_event.type
- assert original_event.role == reconstructed_event.role
- assert original_event.content == reconstructed_event.content
-
- def test_comprehensive_function_call_scenarios(self):
- """Test comprehensive function call scenarios."""
- # Test various function call types
- function_scenarios = [
- {
- "id": "weather-1",
- "name": "get_weather",
- "arguments": '{"location": "New York", "units": "metric"}',
- "client_side": False
- },
- {
- "id": "time-1",
- "name": "get_current_time",
- "arguments": '{"timezone": "America/New_York"}',
- "client_side": True
- },
- {
- "id": "calc-1",
- "name": "calculate",
- "arguments": '{"expression": "2 + 2"}',
- "client_side": False
- }
- ]
-
- for scenario in function_scenarios:
- event = AgentV1FunctionCallRequestEvent(
- type="FunctionCallRequest",
- functions=[scenario]
- )
- assert len(event.functions) == 1
- assert event.functions[0].name == scenario["name"]
- assert event.functions[0].client_side == scenario["client_side"]
-
- def test_latency_measurements_edge_cases(self):
- """Test latency measurements with edge cases."""
- # Test with zero latencies
- event = AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency=0.0,
- tts_latency=0.0,
- ttt_latency=0.0
- )
- assert event.total_latency == 0.0
-
- # Test with very high latencies
- event = AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency=99999.999,
- tts_latency=50000.0,
- ttt_latency=49999.999
- )
- assert event.total_latency == 99999.999
-
- # Test with fractional latencies
- event = AgentV1AgentStartedSpeakingEvent(
- type="AgentStartedSpeaking",
- total_latency=123.456789,
- tts_latency=45.123456,
- ttt_latency=78.333333
- )
- assert event.total_latency == 123.456789
-
- def test_error_and_warning_comprehensive(self):
- """Test comprehensive error and warning scenarios."""
- # Test common error scenarios
- error_scenarios = [
- {
- "description": "Function 'get_weather' not found",
- "code": "FUNCTION_NOT_FOUND"
- },
- {
- "description": "Invalid function arguments provided",
- "code": "INVALID_ARGUMENTS"
- },
- {
- "description": "Function execution timeout",
- "code": "FUNCTION_TIMEOUT"
- },
- {
- "description": "Rate limit exceeded for function calls",
- "code": "RATE_LIMIT_EXCEEDED"
- }
- ]
-
- for scenario in error_scenarios:
- event = AgentV1ErrorEvent(
- type="Error",
- description=scenario["description"],
- code=scenario["code"]
- )
- assert event.description == scenario["description"]
- assert event.code == scenario["code"]
-
- # Test common warning scenarios
- warning_scenarios = [
- {
- "description": "Function call taking longer than expected",
- "code": "FUNCTION_SLOW_WARNING"
- },
- {
- "description": "Connection quality may affect performance",
- "code": "CONNECTION_QUALITY_WARNING"
- }
- ]
-
- for scenario in warning_scenarios:
- event = AgentV1WarningEvent(
- type="Warning",
- description=scenario["description"],
- code=scenario["code"]
- )
- assert event.description == scenario["description"]
- assert event.code == scenario["code"]
-
- def test_model_immutability(self, valid_model_data):
- """Test that models are properly validated on construction."""
- data = valid_model_data("agent_v1_conversation_text")
- event = AgentV1ConversationTextEvent(**data)
-
- # Models should be immutable by default in Pydantic v2
- # Test that we can access all fields
- assert event.type == "ConversationText"
- assert event.role is not None
- assert event.content is not None
diff --git a/tests/unit/test_api_response_models.py b/tests/unit/test_api_response_models.py
deleted file mode 100644
index 7479e2fa..00000000
--- a/tests/unit/test_api_response_models.py
+++ /dev/null
@@ -1,626 +0,0 @@
-"""
-Unit tests for core API response models.
-"""
-import pytest
-from pydantic import ValidationError
-
-# Import the core API response models
-from deepgram.types.listen_v1response import ListenV1Response
-from deepgram.types.read_v1response import ReadV1Response
-from deepgram.types.speak_v1response import SpeakV1Response
-from deepgram.types.error_response_modern_error import ErrorResponseModernError
-from deepgram.types.error_response_legacy_error import ErrorResponseLegacyError
-
-
-class TestListenV1Response:
- """Test ListenV1Response model."""
-
- def test_valid_listen_response(self):
- """Test creating a valid listen response."""
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- },
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": [
- {
- "word": "Hello",
- "start": 0.0,
- "end": 0.5,
- "confidence": 0.95
- },
- {
- "word": "world",
- "start": 0.6,
- "end": 1.0,
- "confidence": 0.95
- }
- ]
- }
- ]
- }
- ]
- }
- }
-
- response = ListenV1Response(**response_data)
-
- assert response.metadata is not None
- assert response.results is not None
- assert response.metadata.request_id == "req-123"
- assert response.metadata.duration == 1.5
- assert response.metadata.channels == 1
-
- def test_listen_response_serialization(self):
- """Test listen response serialization."""
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- response = ListenV1Response(**response_data)
-
- # Test dict conversion
- response_dict = response.model_dump()
- assert "metadata" in response_dict
- assert "results" in response_dict
- assert response_dict["metadata"]["request_id"] == "req-123"
-
- # Test JSON serialization
- json_str = response.model_dump_json()
- assert '"request_id":"req-123"' in json_str
- assert '"transcript":"Hello world"' in json_str
-
- def test_listen_response_missing_required_fields(self):
- """Test listen response with missing required fields."""
- # Missing metadata
- with pytest.raises(ValidationError) as exc_info:
- ListenV1Response(
- results={
- "channels": []
- }
- )
- assert "metadata" in str(exc_info.value)
-
- # Missing results
- with pytest.raises(ValidationError) as exc_info:
- ListenV1Response(
- metadata={
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": []
- }
- )
- assert "results" in str(exc_info.value)
-
- def test_listen_response_empty_channels(self):
- """Test listen response with empty channels."""
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 0,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": []
- }
- }
-
- response = ListenV1Response(**response_data)
- assert len(response.results.channels) == 0
- assert response.metadata.channels == 0
-
- def test_listen_response_multiple_alternatives(self):
- """Test listen response with multiple alternatives."""
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": []
- },
- {
- "transcript": "Hello word",
- "confidence": 0.85,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- response = ListenV1Response(**response_data)
- assert len(response.results.channels) == 1
- assert len(response.results.channels[0].alternatives) == 2
- assert response.results.channels[0].alternatives[0].confidence == 0.95
- assert response.results.channels[0].alternatives[1].confidence == 0.85
-
-
-class TestReadV1Response:
- """Test ReadV1Response model."""
-
- def test_valid_read_response(self):
- """Test creating a valid read response."""
- response_data = {
- "metadata": {
- "request_id": "read-123",
- "created": "2023-01-01T00:00:00Z",
- "language": "en",
- "model": "nova-2-general",
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "summary": {
- "text": "This is a summary of the analyzed text.",
- "start_word": 0,
- "end_word": 10
- }
- }
- }
-
- response = ReadV1Response(**response_data)
-
- assert response.metadata is not None
- assert response.results is not None
- assert response.metadata.request_id == "read-123"
- assert response.metadata.language == "en"
- assert response.results.summary.text == "This is a summary of the analyzed text."
-
- def test_read_response_serialization(self):
- """Test read response serialization."""
- response_data = {
- "metadata": {
- "request_id": "read-123",
- "created": "2023-01-01T00:00:00Z",
- "language": "en",
- "model": "nova-2-general"
- },
- "results": {
- "summary": {
- "text": "Summary text",
- "start_word": 0,
- "end_word": 5
- }
- }
- }
-
- response = ReadV1Response(**response_data)
-
- # Test dict conversion
- response_dict = response.model_dump()
- assert "metadata" in response_dict
- assert "results" in response_dict
- assert response_dict["metadata"]["request_id"] == "read-123"
-
- # Test JSON serialization
- json_str = response.model_dump_json()
- assert '"request_id":"read-123"' in json_str
- assert '"text":"Summary text"' in json_str
-
- def test_read_response_missing_required_fields(self):
- """Test read response with missing required fields."""
- # Missing metadata
- with pytest.raises(ValidationError) as exc_info:
- ReadV1Response(
- results={
- "summary": {
- "text": "Summary",
- "start_word": 0,
- "end_word": 1
- }
- }
- )
- assert "metadata" in str(exc_info.value)
-
- def test_read_response_optional_fields(self):
- """Test read response with optional fields."""
- response_data = {
- "metadata": {
- "request_id": "read-123",
- "created": "2023-01-01T00:00:00Z",
- "language": "en",
- "model": "nova-2-general",
- "intents_info": {
- "model_uuid": "intent-model-123"
- },
- "sentiment_info": {
- "model_uuid": "sentiment-model-123"
- },
- "topics_info": {
- "model_uuid": "topics-model-123"
- },
- "summary_info": {
- "model_uuid": "summary-model-123"
- }
- },
- "results": {
- "summary": {
- "text": "Summary with all optional metadata",
- "start_word": 0,
- "end_word": 5
- }
- }
- }
-
- response = ReadV1Response(**response_data)
- assert response.metadata.intents_info is not None
- assert response.metadata.sentiment_info is not None
- assert response.metadata.topics_info is not None
- assert response.metadata.summary_info is not None
-
-
-class TestSpeakV1Response:
- """Test SpeakV1Response model."""
-
- def test_valid_speak_response(self, sample_audio_data):
- """Test creating a valid speak response."""
- # SpeakV1Response is typically just bytes (audio data)
- assert isinstance(sample_audio_data, bytes)
- assert len(sample_audio_data) > 0
-
- def test_empty_speak_response(self):
- """Test empty speak response."""
- empty_audio = b""
- assert isinstance(empty_audio, bytes)
- assert len(empty_audio) == 0
-
- def test_large_speak_response(self):
- """Test large speak response."""
- large_audio = b"\x00\x01\x02\x03" * 50000 # 200KB
- assert isinstance(large_audio, bytes)
- assert len(large_audio) == 200000
-
- def test_speak_response_audio_formats(self):
- """Test speak response with different audio format headers."""
- # WAV header simulation
- wav_header = b"RIFF\x24\x08\x00\x00WAVEfmt "
- wav_audio = wav_header + b"\x00\x01" * 1000
- assert isinstance(wav_audio, bytes)
- assert wav_audio.startswith(b"RIFF")
-
- # MP3 header simulation
- mp3_header = b"\xff\xfb" # MP3 sync word
- mp3_audio = mp3_header + b"\x00\x01" * 1000
- assert isinstance(mp3_audio, bytes)
- assert mp3_audio.startswith(b"\xff\xfb")
-
-
-class TestErrorResponseModern:
- """Test ErrorResponseModernError model."""
-
- def test_valid_modern_error_response(self):
- """Test creating a valid modern error response."""
- error_data = {
- "message": "Invalid API key",
- "category": "authentication_error"
- }
-
- response = ErrorResponseModernError(**error_data)
- assert response.message == "Invalid API key"
- assert response.category == "authentication_error"
-
-
-class TestErrorResponseLegacy:
- """Test ErrorResponseLegacyError model."""
-
- def test_valid_legacy_error_response(self):
- """Test creating a valid legacy error response."""
- error_data = {
- "err_code": "INVALID_AUTH",
- "err_msg": "Invalid credentials provided"
- }
-
- response = ErrorResponseLegacyError(**error_data)
- assert response.err_code == "INVALID_AUTH"
- assert response.err_msg == "Invalid credentials provided"
-
- def test_error_response_serialization(self):
- """Test error response serialization."""
- error_data = {
- "err_code": "RATE_LIMIT",
- "err_msg": "Rate limit exceeded"
- }
-
- response = ErrorResponseLegacyError(**error_data)
-
- # Test dict conversion
- response_dict = response.model_dump()
- assert response_dict["err_code"] == "RATE_LIMIT"
- assert response_dict["err_msg"] == "Rate limit exceeded"
-
- # Test JSON serialization
- json_str = response.model_dump_json()
- assert '"err_code":"RATE_LIMIT"' in json_str
- assert '"err_msg":"Rate limit exceeded"' in json_str
-
-
-class TestAPIResponseModelIntegration:
- """Integration tests for API response models."""
-
- def test_model_roundtrip_serialization(self):
- """Test that models can be serialized and deserialized."""
- # Test listen response roundtrip
- original_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- original_response = ListenV1Response(**original_data)
-
- # Serialize to JSON and back
- json_str = original_response.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_response = ListenV1Response(**parsed_data)
-
- assert original_response.metadata.request_id == reconstructed_response.metadata.request_id
- assert original_response.metadata.duration == reconstructed_response.metadata.duration
- assert len(original_response.results.channels) == len(reconstructed_response.results.channels)
-
- def test_model_validation_edge_cases(self):
- """Test edge cases in model validation."""
- # Test with very long transcript
- long_transcript = "word " * 10000 # ~50KB
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1000.0,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": long_transcript,
- "confidence": 0.95,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- response = ListenV1Response(**response_data)
- assert len(response.results.channels[0].alternatives[0].transcript) > 40000
-
- def test_model_with_extreme_numeric_values(self):
- """Test models with extreme numeric values."""
- # Test with very high confidence and long duration
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 99999.999999,
- "channels": 1000,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Test",
- "confidence": 1.0,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- response = ListenV1Response(**response_data)
- assert response.metadata.duration == 99999.999999
- assert response.metadata.channels == 1000
- assert response.results.channels[0].alternatives[0].confidence == 1.0
-
- def test_comprehensive_error_scenarios(self):
- """Test comprehensive error scenarios."""
- # Test various HTTP error codes and messages
- error_scenarios = [
- {
- "message": "Bad Request - Invalid parameters",
- "type": "bad_request_error"
- },
- {
- "message": "Unauthorized - Invalid API key",
- "type": "authentication_error"
- },
- {
- "message": "Forbidden - Insufficient permissions",
- "type": "permission_error"
- },
- {
- "message": "Not Found - Resource does not exist",
- "type": "not_found_error"
- },
- {
- "message": "Too Many Requests - Rate limit exceeded",
- "type": "rate_limit_error"
- },
- {
- "message": "Internal Server Error",
- "type": "server_error"
- },
- {
- "message": "Service Unavailable - Try again later",
- "type": "service_unavailable_error"
- }
- ]
-
- for scenario in error_scenarios:
- error_response = ErrorResponseModernError(
- message=scenario["message"],
- category=scenario["type"]
- )
- assert error_response.message == scenario["message"]
- assert error_response.category == scenario["type"]
-
- def test_model_comparison_and_equality(self):
- """Test model equality comparison."""
- response_data = {
- "metadata": {
- "transaction_key": "deprecated",
- "request_id": "req-123",
- "sha256": "abc123def456",
- "created": "2023-01-01T00:00:00Z",
- "duration": 1.5,
- "channels": 1,
- "models": ["nova-2-general"],
- "model_info": {
- "name": "nova-2-general",
- "version": "1.0",
- "arch": "nova"
- }
- },
- "results": {
- "channels": [
- {
- "alternatives": [
- {
- "transcript": "Hello world",
- "confidence": 0.95,
- "words": []
- }
- ]
- }
- ]
- }
- }
-
- response1 = ListenV1Response(**response_data)
- response2 = ListenV1Response(**response_data)
-
- # Same data should be equal
- assert response1 == response2
-
- # Different data should not be equal
- different_data = response_data.copy()
- different_data["metadata"]["request_id"] = "req-456"
- response3 = ListenV1Response(**different_data)
- assert response1 != response3
diff --git a/tests/unit/test_core_file.py b/tests/unit/test_core_file.py
deleted file mode 100644
index 4f7ba397..00000000
--- a/tests/unit/test_core_file.py
+++ /dev/null
@@ -1,279 +0,0 @@
-"""
-Unit tests for core file handling utilities.
-"""
-import io
-import pytest
-
-from deepgram.core.file import (
- convert_file_dict_to_httpx_tuples,
- with_content_type
-)
-
-
-class TestConvertFileDictToHttpxTuples:
- """Test convert_file_dict_to_httpx_tuples function."""
-
- def test_simple_file_dict(self):
- """Test converting a simple file dictionary."""
- file_content = b"test content"
- file_dict = {"audio": file_content}
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- assert result == [("audio", file_content)]
-
- def test_multiple_files(self):
- """Test converting dictionary with multiple files."""
- file_dict = {
- "audio": b"audio content",
- "metadata": "metadata content"
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- expected = [
- ("audio", b"audio content"),
- ("metadata", "metadata content")
- ]
- assert sorted(result) == sorted(expected)
-
- def test_file_list(self):
- """Test converting dictionary with list of files."""
- file_dict = {
- "documents": [
- b"document1 content",
- b"document2 content",
- "document3 content"
- ]
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- expected = [
- ("documents", b"document1 content"),
- ("documents", b"document2 content"),
- ("documents", "document3 content")
- ]
- assert result == expected
-
- def test_mixed_files_and_lists(self):
- """Test converting dictionary with both single files and file lists."""
- file_dict = {
- "single_file": b"single content",
- "multiple_files": [
- b"multi1 content",
- b"multi2 content"
- ]
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- expected = [
- ("single_file", b"single content"),
- ("multiple_files", b"multi1 content"),
- ("multiple_files", b"multi2 content")
- ]
- assert sorted(result) == sorted(expected)
-
- def test_tuple_file_format(self):
- """Test converting files in tuple format."""
- file_dict = {
- "file_with_name": ("test.txt", b"content"),
- "file_with_content_type": ("test.json", b'{"key": "value"}', "application/json")
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- expected = [
- ("file_with_name", ("test.txt", b"content")),
- ("file_with_content_type", ("test.json", b'{"key": "value"}', "application/json"))
- ]
- assert sorted(result) == sorted(expected)
-
- def test_io_objects(self):
- """Test converting with IO objects."""
- file_content = io.BytesIO(b"io content")
- file_dict = {"io_file": file_content}
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- assert result == [("io_file", file_content)]
-
- def test_empty_dict(self):
- """Test converting empty dictionary."""
- result = convert_file_dict_to_httpx_tuples({})
- assert result == []
-
- def test_empty_list_value(self):
- """Test converting dictionary with empty list value."""
- file_dict = {"empty_files": []}
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- assert result == []
-
-
-class TestWithContentType:
- """Test with_content_type function."""
-
- def test_simple_file_content(self):
- """Test adding content type to simple file content."""
- file_content = b"test content"
-
- result = with_content_type(file=file_content, default_content_type="application/octet-stream")
-
- expected = (None, file_content, "application/octet-stream")
- assert result == expected
-
- def test_string_file_content(self):
- """Test adding content type to string file content."""
- file_content = "test content"
-
- result = with_content_type(file=file_content, default_content_type="text/plain")
-
- expected = (None, file_content, "text/plain")
- assert result == expected
-
- def test_io_file_content(self):
- """Test adding content type to IO file content."""
- file_content = io.BytesIO(b"io content")
-
- result = with_content_type(file=file_content, default_content_type="application/octet-stream")
-
- expected = (None, file_content, "application/octet-stream")
- assert result == expected
-
- def test_two_element_tuple(self):
- """Test adding content type to (filename, content) tuple."""
- file_tuple = ("test.txt", b"file content")
-
- result = with_content_type(file=file_tuple, default_content_type="text/plain")
-
- expected = ("test.txt", b"file content", "text/plain")
- assert result == expected
-
- def test_three_element_tuple_with_content_type(self):
- """Test handling (filename, content, content_type) tuple."""
- file_tuple = ("test.json", b'{"key": "value"}', "application/json")
-
- result = with_content_type(file=file_tuple, default_content_type="text/plain")
-
- # Should keep the existing content type
- expected = ("test.json", b'{"key": "value"}', "application/json")
- assert result == expected
-
- def test_three_element_tuple_with_none_content_type(self):
- """Test handling tuple with None content type."""
- file_tuple = ("test.txt", b"content", None)
-
- result = with_content_type(file=file_tuple, default_content_type="text/plain")
-
- # Should use the default content type
- expected = ("test.txt", b"content", "text/plain")
- assert result == expected
-
- def test_four_element_tuple_with_headers(self):
- """Test handling (filename, content, content_type, headers) tuple."""
- headers = {"X-Custom": "value"}
- file_tuple = ("test.txt", b"content", "text/plain", headers)
-
- result = with_content_type(file=file_tuple, default_content_type="application/octet-stream")
-
- # Should keep the existing content type and headers
- expected = ("test.txt", b"content", "text/plain", headers)
- assert result == expected
-
- def test_four_element_tuple_with_none_content_type(self):
- """Test handling tuple with None content type and headers."""
- headers = {"X-Custom": "value"}
- file_tuple = ("test.txt", b"content", None, headers)
-
- result = with_content_type(file=file_tuple, default_content_type="application/json")
-
- # Should use default content type but keep headers
- expected = ("test.txt", b"content", "application/json", headers)
- assert result == expected
-
- def test_invalid_tuple_length(self):
- """Test handling tuple with invalid length."""
- invalid_tuple = ("a", "b", "c", "d", "e") # 5 elements
-
- with pytest.raises(ValueError, match="Unexpected tuple length: 5"):
- with_content_type(file=invalid_tuple, default_content_type="text/plain")
-
- def test_single_element_tuple(self):
- """Test handling single element tuple."""
- invalid_tuple = ("only_one",) # 1 element
-
- with pytest.raises(ValueError, match="Unexpected tuple length: 1"):
- with_content_type(file=invalid_tuple, default_content_type="text/plain")
-
-
-class TestFileTyping:
- """Test file type definitions and edge cases."""
-
- def test_various_file_content_types(self):
- """Test that various FileContent types work correctly."""
- # Test bytes
- bytes_content = b"bytes content"
- result = with_content_type(file=bytes_content, default_content_type="application/octet-stream")
- assert result[1] == bytes_content
-
- # Test string
- string_content = "string content"
- result = with_content_type(file=string_content, default_content_type="text/plain")
- assert result[1] == string_content
-
- # Test IO
- io_content = io.BytesIO(b"io content")
- result = with_content_type(file=io_content, default_content_type="application/octet-stream")
- assert result[1] == io_content
-
- def test_file_dict_with_various_types(self):
- """Test file dict conversion with various file types."""
- string_io = io.StringIO("string io content")
- bytes_io = io.BytesIO(b"bytes io content")
-
- file_dict = {
- "bytes": b"bytes content",
- "string": "string content",
- "string_io": string_io,
- "bytes_io": bytes_io,
- "tuple_basic": ("file.txt", b"content"),
- "tuple_with_type": ("file.json", b'{}', "application/json"),
- "tuple_with_headers": ("file.xml", b"", "application/xml", {"X-Custom": "header"})
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- # Should have 7 tuples
- assert len(result) == 7
-
- # Check that all keys are preserved
- keys = [item[0] for item in result]
- expected_keys = ["bytes", "string", "string_io", "bytes_io", "tuple_basic", "tuple_with_type", "tuple_with_headers"]
- assert sorted(keys) == sorted(expected_keys)
-
- def test_complex_file_combinations(self):
- """Test complex combinations of file types and lists."""
- file_dict = {
- "mixed_list": [
- b"raw bytes",
- ("named.txt", "string content"),
- ("typed.json", b'{"test": true}', "application/json"),
- io.BytesIO(b"io stream")
- ],
- "single_complex": ("complex.xml", io.StringIO("content"), "application/xml", {"Encoding": "utf-8"})
- }
-
- result = convert_file_dict_to_httpx_tuples(file_dict)
-
- # Should have 5 total items (4 from list + 1 single)
- assert len(result) == 5
-
- # All should have "mixed_list" or "single_complex" as key
- mixed_items = [item for item in result if item[0] == "mixed_list"]
- single_items = [item for item in result if item[0] == "single_complex"]
-
- assert len(mixed_items) == 4
- assert len(single_items) == 1
diff --git a/tests/unit/test_core_jsonable_encoder.py b/tests/unit/test_core_jsonable_encoder.py
deleted file mode 100644
index 4ec341e5..00000000
--- a/tests/unit/test_core_jsonable_encoder.py
+++ /dev/null
@@ -1,372 +0,0 @@
-"""
-Unit tests for core JSON encoder functionality.
-"""
-import pytest
-import datetime as dt
-import base64
-import dataclasses
-from enum import Enum
-from pathlib import Path, PurePath
-from typing import Dict, List, Any, Optional, Set
-from unittest.mock import Mock, patch
-import io
-
-from pydantic import BaseModel
-from deepgram.core.jsonable_encoder import jsonable_encoder
-
-
-# Test models and enums
-class JsonTestEnum(str, Enum):
- VALUE_ONE = "value_one"
- VALUE_TWO = "value_two"
-
-
-class SimpleModel(BaseModel):
- name: str
- age: int
- active: bool = True
-
-
-@dataclasses.dataclass
-class JsonTestDataclass:
- name: str
- value: int
- optional: Optional[str] = None
-
-
-class TestJsonableEncoder:
- """Test jsonable_encoder function."""
-
- def test_simple_types(self):
- """Test encoding simple Python types."""
- # Strings
- assert jsonable_encoder("hello") == "hello"
-
- # Numbers
- assert jsonable_encoder(42) == 42
- assert jsonable_encoder(3.14) == 3.14
-
- # Booleans
- assert jsonable_encoder(True) is True
- assert jsonable_encoder(False) is False
-
- # None
- assert jsonable_encoder(None) is None
-
- def test_collections(self):
- """Test encoding collection types."""
- # Lists
- assert jsonable_encoder([1, 2, 3]) == [1, 2, 3]
- assert jsonable_encoder(["a", "b", "c"]) == ["a", "b", "c"]
-
- # Tuples (should become lists)
- assert jsonable_encoder((1, 2, 3)) == [1, 2, 3]
-
- # Sets (should become lists)
- result = jsonable_encoder({1, 2, 3})
- assert isinstance(result, list)
- assert sorted(result) == [1, 2, 3]
-
- # Dictionaries
- test_dict = {"key1": "value1", "key2": 42}
- assert jsonable_encoder(test_dict) == test_dict
-
- def test_datetime_objects(self):
- """Test encoding datetime objects."""
- # datetime
- dt_obj = dt.datetime(2023, 12, 25, 10, 30, 45)
- result = jsonable_encoder(dt_obj)
- assert isinstance(result, str)
- assert "2023-12-25T10:30:45" in result
-
- # date
- date_obj = dt.date(2023, 12, 25)
- result = jsonable_encoder(date_obj)
- assert isinstance(result, str)
- assert "2023-12-25" in result
-
- # time
- time_obj = dt.time(10, 30, 45)
- result = jsonable_encoder(time_obj)
- assert isinstance(result, str)
- assert "10:30:45" in result
-
- # timedelta
- delta_obj = dt.timedelta(days=5, hours=3, minutes=30)
- result = jsonable_encoder(delta_obj)
- # Should be encoded as string in ISO format or total seconds
- assert isinstance(result, (float, str))
-
- def test_enum_encoding(self):
- """Test encoding enum values."""
- assert jsonable_encoder(JsonTestEnum.VALUE_ONE) == "value_one"
- assert jsonable_encoder(JsonTestEnum.VALUE_TWO) == "value_two"
-
- def test_pydantic_model_encoding(self):
- """Test encoding Pydantic models."""
- model = SimpleModel(name="John", age=30)
- result = jsonable_encoder(model)
-
- expected = {"name": "John", "age": 30, "active": True}
- assert result == expected
-
- def test_dataclass_encoding(self):
- """Test encoding dataclass objects."""
- dataclass_obj = JsonTestDataclass(name="Test", value=42, optional="optional_value")
- result = jsonable_encoder(dataclass_obj)
-
- expected = {"name": "Test", "value": 42, "optional": "optional_value"}
- assert result == expected
-
- def test_dataclass_with_none_values(self):
- """Test encoding dataclass with None values."""
- dataclass_obj = JsonTestDataclass(name="Test", value=42) # optional defaults to None
- result = jsonable_encoder(dataclass_obj)
-
- expected = {"name": "Test", "value": 42, "optional": None}
- assert result == expected
-
- def test_path_objects(self):
- """Test encoding Path and PurePath objects."""
- # Path object
- path_obj = Path("/tmp/test.txt")
- result = jsonable_encoder(path_obj)
- assert result == str(path_obj)
-
- # PurePath object
- pure_path_obj = PurePath("/tmp/pure_test.txt")
- result = jsonable_encoder(pure_path_obj)
- assert result == str(pure_path_obj)
-
- def test_bytes_encoding(self):
- """Test encoding bytes objects."""
- bytes_data = b"hello world"
- result = jsonable_encoder(bytes_data)
-
- # Should be base64 encoded
- expected = base64.b64encode(bytes_data).decode()
- assert result == expected
-
- def test_nested_structures(self):
- """Test encoding nested data structures."""
- nested_data = {
- "user": SimpleModel(name="Alice", age=25),
- "timestamps": [
- dt.datetime(2023, 1, 1, 12, 0, 0),
- dt.datetime(2023, 1, 2, 12, 0, 0)
- ],
- "metadata": {
- "enum_value": JsonTestEnum.VALUE_ONE,
- "path": Path("/tmp/file.txt"),
- "data": JsonTestDataclass(name="nested", value=100)
- }
- }
-
- result = jsonable_encoder(nested_data)
-
- # Check structure is preserved
- assert "user" in result
- assert "timestamps" in result
- assert "metadata" in result
-
- # Check user model is encoded
- assert result["user"]["name"] == "Alice"
- assert result["user"]["age"] == 25
-
- # Check timestamps are encoded as strings
- assert all(isinstance(ts, str) for ts in result["timestamps"])
-
- # Check nested metadata
- assert result["metadata"]["enum_value"] == "value_one"
- assert result["metadata"]["path"] == "/tmp/file.txt"
- assert result["metadata"]["data"]["name"] == "nested"
-
- def test_custom_encoder(self):
- """Test using custom encoder functions."""
- class CustomClass:
- def __init__(self, value):
- self.value = value
-
- def custom_encoder(obj):
- return f"custom_{obj.value}"
-
- custom_obj = CustomClass("test")
- result = jsonable_encoder(custom_obj, custom_encoder={CustomClass: custom_encoder})
-
- assert result == "custom_test"
-
- def test_custom_encoder_inheritance(self):
- """Test custom encoder with inheritance."""
- class BaseClass:
- def __init__(self, value):
- self.value = value
-
- class DerivedClass(BaseClass):
- pass
-
- def base_encoder(obj):
- return f"base_{obj.value}"
-
- derived_obj = DerivedClass("derived")
- result = jsonable_encoder(derived_obj, custom_encoder={BaseClass: base_encoder})
-
- assert result == "base_derived"
-
- def test_generator_encoding(self):
- """Test encoding generator objects."""
- def test_generator():
- yield 1
- yield 2
- yield 3
-
- gen = test_generator()
- result = jsonable_encoder(gen)
-
- # Generator should be converted to list
- assert result == [1, 2, 3]
-
- def test_complex_nested_with_custom_encoders(self):
- """Test complex nested structure with custom encoders."""
- class SpecialValue:
- def __init__(self, data):
- self.data = data
-
- def special_encoder(obj):
- return {"special": obj.data}
-
- complex_data = {
- "models": [
- SimpleModel(name="User1", age=20),
- SimpleModel(name="User2", age=30)
- ],
- "special": SpecialValue("important_data"),
- "mixed_list": [
- JsonTestEnum.VALUE_ONE,
- dt.datetime(2023, 6, 15),
- {"nested": SpecialValue("nested_data")}
- ]
- }
-
- result = jsonable_encoder(complex_data, custom_encoder={SpecialValue: special_encoder})
-
- # Check models are encoded
- assert len(result["models"]) == 2
- assert result["models"][0]["name"] == "User1"
-
- # Check custom encoder is used
- assert result["special"] == {"special": "important_data"}
- assert result["mixed_list"][2]["nested"] == {"special": "nested_data"}
-
- # Check enum and datetime are encoded
- assert result["mixed_list"][0] == "value_one"
- assert isinstance(result["mixed_list"][1], str)
-
- def test_pydantic_model_with_custom_config(self):
- """Test Pydantic model with custom JSON encoders in config."""
- class ModelWithCustomEncoder(BaseModel):
- name: str
- special_field: Any
-
- class Config:
- json_encoders = {
- str: lambda v: v.upper()
- }
-
- model = ModelWithCustomEncoder(name="test", special_field="special")
- result = jsonable_encoder(model)
-
- # The custom encoder from model config should be applied
- # Note: This tests the integration with Pydantic's config
- assert "name" in result
- assert "special_field" in result
-
- def test_edge_cases(self):
- """Test edge cases and unusual inputs."""
- # Empty collections
- assert jsonable_encoder([]) == []
- assert jsonable_encoder({}) == {}
- assert jsonable_encoder(set()) == []
-
- # Nested empty collections
- assert jsonable_encoder({"empty": []}) == {"empty": []}
-
- # Very deep nesting
- deep_dict = {"level": {"level": {"level": "deep_value"}}}
- result = jsonable_encoder(deep_dict)
- assert result["level"]["level"]["level"] == "deep_value"
-
- def test_circular_reference_handling(self):
- """Test that circular references are handled gracefully."""
- # Create a structure that could cause infinite recursion
- data = {"self_ref": None}
- # Don't actually create circular reference as it would cause issues
- # Instead test that normal references work fine
- shared_dict = {"shared": "value"}
- data = {"ref1": shared_dict, "ref2": shared_dict}
-
- result = jsonable_encoder(data)
- assert result["ref1"]["shared"] == "value"
- assert result["ref2"]["shared"] == "value"
-
- def test_io_objects(self):
- """Test encoding IO objects."""
- # StringIO
- string_io = io.StringIO("test content")
- result = jsonable_encoder(string_io)
- # Should be converted to some JSON-serializable form
- assert isinstance(result, (str, dict, list))
-
- # BytesIO
- bytes_io = io.BytesIO(b"test content")
- result = jsonable_encoder(bytes_io)
- # Should be handled appropriately
- assert result is not None
-
-
-class TestJsonableEncoderEdgeCases:
- """Test edge cases and error conditions."""
-
- def test_none_custom_encoder(self):
- """Test that None custom_encoder is handled properly."""
- result = jsonable_encoder("test", custom_encoder=None)
- assert result == "test"
-
- def test_empty_custom_encoder(self):
- """Test that empty custom_encoder dict is handled properly."""
- result = jsonable_encoder("test", custom_encoder={})
- assert result == "test"
-
- def test_unicode_strings(self):
- """Test encoding unicode strings."""
- unicode_data = {
- "chinese": "δ½ ε₯½δΈη",
- "emoji": "πππ«",
- "mixed": "Hello δΈη π",
- "special_chars": "cafΓ© naΓ―ve rΓ©sumΓ©"
- }
-
- result = jsonable_encoder(unicode_data)
- assert result == unicode_data # Should pass through unchanged
-
- def test_very_large_numbers(self):
- """Test encoding very large numbers."""
- large_int = 2**100
- large_float = float('1e308')
-
- assert jsonable_encoder(large_int) == large_int
- assert jsonable_encoder(large_float) == large_float
-
- def test_special_float_values(self):
- """Test encoding special float values."""
- import math
-
- # Note: These might be handled differently by the encoder
- # The exact behavior depends on the implementation
- result_inf = jsonable_encoder(float('inf'))
- result_ninf = jsonable_encoder(float('-inf'))
- result_nan = jsonable_encoder(float('nan'))
-
- # Just ensure they don't crash and return something
- assert result_inf is not None
- assert result_ninf is not None
- assert result_nan is not None
diff --git a/tests/unit/test_core_models.py b/tests/unit/test_core_models.py
deleted file mode 100644
index 2b430999..00000000
--- a/tests/unit/test_core_models.py
+++ /dev/null
@@ -1,430 +0,0 @@
-"""
-Unit tests for core data models and utilities.
-"""
-import pytest
-from pydantic import ValidationError
-
-# Import core utility models
-from deepgram.extensions.telemetry.models import TelemetryEvent, TelemetryContext
-from deepgram.core.api_error import ApiError
-from deepgram.environment import DeepgramClientEnvironment
-
-
-class TestTelemetryModels:
- """Test telemetry-related models."""
-
- def test_valid_telemetry_event(self):
- """Test creating a valid telemetry event."""
- from datetime import datetime
- event = TelemetryEvent(
- name="connection_started",
- time=datetime.now(),
- attributes={"connection_type": "websocket"},
- metrics={}
- )
-
- assert event.name == "connection_started"
- assert event.time is not None
- assert event.attributes["connection_type"] == "websocket"
- assert event.metrics == {}
-
- def test_telemetry_event_serialization(self):
- """Test telemetry event serialization."""
- from datetime import datetime
- event = TelemetryEvent(
- name="audio_sent",
- time=datetime.now(),
- attributes={"bytes_sent": "1024"},
- metrics={"latency": 50.5}
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["name"] == "audio_sent"
- assert event_dict["attributes"]["bytes_sent"] == "1024"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"name":"audio_sent"' in json_str
- assert '"bytes_sent":"1024"' in json_str
-
- def test_telemetry_event_missing_required_fields(self):
- """Test telemetry event with missing required fields."""
- # Missing name
- from datetime import datetime
- with pytest.raises(ValidationError) as exc_info:
- TelemetryEvent(
- time=datetime.now(),
- attributes={},
- metrics={}
- )
- assert "name" in str(exc_info.value)
-
- # Missing time
- from datetime import datetime
- with pytest.raises(ValidationError) as exc_info:
- TelemetryEvent(
- name="connection_started",
- attributes={},
- metrics={}
- )
- assert "time" in str(exc_info.value)
-
- def test_telemetry_event_optional_metadata(self):
- """Test telemetry event with optional metadata."""
- from datetime import datetime
- # Event without attributes/metrics
- event = TelemetryEvent(
- name="connection_closed",
- time=datetime.now(),
- attributes={},
- metrics={}
- )
-
- assert event.attributes == {}
- assert event.metrics == {}
-
- # Event with complex attributes and metrics
- complex_attributes = {
- "connection_type": "websocket",
- "bytes_sent": "1024",
- "bytes_received": "2048",
- "error_count": "0",
- "model_name": "nova-2-general",
- "model_version": "1.0"
- }
-
- complex_metrics = {
- "connection_duration": 30.5,
- "latency": 150.0
- }
-
- event_with_data = TelemetryEvent(
- name="connection_summary",
- time=datetime.now(),
- attributes=complex_attributes,
- metrics=complex_metrics
- )
-
- assert event_with_data.attributes["bytes_sent"] == "1024"
- assert event_with_data.attributes["model_name"] == "nova-2-general"
- assert event_with_data.metrics["connection_duration"] == 30.5
-
- def test_telemetry_context_model(self):
- """Test telemetry context model."""
- context = TelemetryContext(
- session_id="session-123",
- request_id="req-456"
- )
-
- assert context.session_id == "session-123"
- assert context.request_id == "req-456"
-
- def test_telemetry_context_serialization(self):
- """Test telemetry context serialization."""
- context = TelemetryContext(
- session_id="session-123",
- request_id="req-456"
- )
-
- # Test dict conversion
- context_dict = context.model_dump()
- assert context_dict["session_id"] == "session-123"
- assert context_dict["request_id"] == "req-456"
-
- # Test JSON serialization
- json_str = context.model_dump_json()
- assert '"session_id":"session-123"' in json_str
- assert '"request_id":"req-456"' in json_str
-
-
-class TestApiError:
- """Test ApiError model."""
-
- def test_api_error_creation(self):
- """Test creating an API error."""
- error = ApiError(
- status_code=401,
- body="Unauthorized: Invalid API key"
- )
-
- assert error.status_code == 401
- assert error.body == "Unauthorized: Invalid API key"
- assert "401" in str(error)
- assert "Unauthorized" in str(error)
-
- def test_api_error_with_headers(self):
- """Test API error with headers."""
- headers = {
- "Content-Type": "application/json",
- "X-RateLimit-Remaining": "0"
- }
-
- error = ApiError(
- status_code=429,
- body="Rate limit exceeded",
- headers=headers
- )
-
- assert error.status_code == 429
- assert error.headers["Content-Type"] == "application/json"
- assert error.headers["X-RateLimit-Remaining"] == "0"
-
- def test_api_error_common_scenarios(self):
- """Test common API error scenarios."""
- # 400 Bad Request
- bad_request = ApiError(
- status_code=400,
- body="Bad Request: Invalid parameters"
- )
- assert bad_request.status_code == 400
-
- # 401 Unauthorized
- unauthorized = ApiError(
- status_code=401,
- body="Unauthorized: Invalid API key"
- )
- assert unauthorized.status_code == 401
-
- # 403 Forbidden
- forbidden = ApiError(
- status_code=403,
- body="Forbidden: Insufficient permissions"
- )
- assert forbidden.status_code == 403
-
- # 404 Not Found
- not_found = ApiError(
- status_code=404,
- body="Not Found: Resource does not exist"
- )
- assert not_found.status_code == 404
-
- # 429 Too Many Requests
- rate_limit = ApiError(
- status_code=429,
- body="Too Many Requests: Rate limit exceeded"
- )
- assert rate_limit.status_code == 429
-
- # 500 Internal Server Error
- server_error = ApiError(
- status_code=500,
- body="Internal Server Error"
- )
- assert server_error.status_code == 500
-
- def test_api_error_json_body(self):
- """Test API error with JSON body."""
- json_body = '{"error": {"message": "Invalid model", "type": "validation_error"}}'
-
- error = ApiError(
- status_code=400,
- body=json_body
- )
-
- assert error.status_code == 400
- assert "validation_error" in error.body
- assert "Invalid model" in error.body
-
- def test_api_error_empty_body(self):
- """Test API error with empty body."""
- error = ApiError(
- status_code=500,
- body=""
- )
-
- assert error.status_code == 500
- assert error.body == ""
-
-
-class TestDeepgramClientEnvironment:
- """Test DeepgramClientEnvironment enum."""
-
- def test_environment_values(self):
- """Test environment enum values."""
- # Test production environment
- prod_env = DeepgramClientEnvironment.PRODUCTION
- assert prod_env is not None
-
- # Test that we can access the production URL
- assert hasattr(prod_env, 'production') or str(prod_env) == "https://api.deepgram.com"
-
- def test_environment_string_representation(self):
- """Test environment string representation."""
- prod_env = DeepgramClientEnvironment.PRODUCTION
- env_str = str(prod_env)
-
- # Should contain a valid URL
- assert "https://" in env_str or "deepgram" in env_str.lower()
-
- def test_environment_comparison(self):
- """Test environment comparison."""
- env1 = DeepgramClientEnvironment.PRODUCTION
- env2 = DeepgramClientEnvironment.PRODUCTION
-
- # Same environments should be equal
- assert env1 == env2
- assert env1 is env2 # Enum instances should be the same object
-
-
-class TestCoreModelIntegration:
- """Integration tests for core models."""
-
- def test_telemetry_event_comprehensive_scenarios(self):
- """Test comprehensive telemetry event scenarios."""
- # Connection lifecycle events
- connection_events = [
- {
- "event_type": "connection_started",
- "metadata": {"connection_type": "websocket", "url": "wss://api.deepgram.com"}
- },
- {
- "event_type": "audio_sent",
- "metadata": {"bytes_sent": "1024", "chunk_count": "1"}
- },
- {
- "event_type": "transcription_received",
- "metadata": {"transcript_length": "50", "confidence": "0.95"}
- },
- {
- "event_type": "connection_closed",
- "metadata": {"duration": "30.5", "reason": "client_disconnect"}
- }
- ]
-
- from datetime import datetime, timedelta
- for i, event_data in enumerate(connection_events):
- event = TelemetryEvent(
- name=event_data["event_type"],
- time=datetime.now() + timedelta(seconds=i),
- attributes=event_data["metadata"],
- metrics={}
- )
-
- assert event.name == event_data["event_type"]
- assert event.time is not None
- assert event.attributes == event_data["metadata"]
-
- def test_api_error_with_telemetry_context(self):
- """Test API error in the context of telemetry."""
- # Simulate an error that would generate telemetry
- error = ApiError(
- status_code=429,
- body="Rate limit exceeded",
- headers={"X-RateLimit-Reset": "1609459200"}
- )
-
- # Create a telemetry event for this error
- from datetime import datetime
- error_event = TelemetryEvent(
- name="api_error",
- time=datetime.now(),
- attributes={
- "status_code": str(error.status_code),
- "error_body": error.body,
- "rate_limit_reset": error.headers.get("X-RateLimit-Reset") if error.headers else None
- },
- metrics={}
- )
-
- assert error_event.attributes["status_code"] == "429"
- assert error_event.attributes["error_body"] == "Rate limit exceeded"
- assert error_event.attributes["rate_limit_reset"] == "1609459200"
-
- def test_telemetry_headers_structure(self):
- """Test telemetry-related headers structure."""
- telemetry_headers = {
- "X-Deepgram-Session-ID": "session-123",
- "X-Deepgram-Request-ID": "req-456",
- "X-Deepgram-SDK-Version": "1.0.0",
- "X-Deepgram-Platform": "python"
- }
-
- # Test that we can create and validate header structures
- assert telemetry_headers["X-Deepgram-Session-ID"] == "session-123"
- assert telemetry_headers["X-Deepgram-Request-ID"] == "req-456"
- assert telemetry_headers["X-Deepgram-SDK-Version"] == "1.0.0"
- assert telemetry_headers["X-Deepgram-Platform"] == "python"
-
- def test_model_serialization_consistency(self):
- """Test that all models serialize consistently."""
- # Test telemetry event
- from datetime import datetime
- event = TelemetryEvent(
- name="test_event",
- time=datetime.now(),
- attributes={"test": "True"},
- metrics={"value": 42.0}
- )
-
- # Serialize and deserialize
- json_str = event.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_event = TelemetryEvent(**parsed_data)
-
- assert event.name == reconstructed_event.name
- # Compare timestamps allowing for timezone differences during serialization
- assert event.time.replace(tzinfo=None) == reconstructed_event.time.replace(tzinfo=None)
- assert event.attributes == reconstructed_event.attributes
- assert event.metrics == reconstructed_event.metrics
-
- def test_model_validation_edge_cases(self):
- """Test model validation edge cases."""
- # Test with very long strings
- from datetime import datetime
- long_name = "test_event_" + "x" * 10000
- event = TelemetryEvent(
- name=long_name,
- time=datetime.now(),
- attributes={},
- metrics={}
- )
- assert len(event.name) > 10000
-
- # Test with complex string attributes (since attributes must be Dict[str, str])
- complex_attributes = {
- "connection_type": "websocket",
- "bytes_sent": "1024",
- "bytes_received": "2048",
- "error_count": "0",
- "model_name": "nova-2-general",
- "model_version": "1.0"
- }
-
- event_with_complex_attributes = TelemetryEvent(
- name="complex_test",
- time=datetime.now(),
- attributes=complex_attributes,
- metrics={}
- )
-
- assert event_with_complex_attributes.attributes["bytes_sent"] == "1024"
- assert event_with_complex_attributes.attributes["model_name"] == "nova-2-general"
-
- def test_error_handling_comprehensive(self):
- """Test comprehensive error handling scenarios."""
- # Test various error status codes with realistic bodies
- error_scenarios = [
- (400, '{"error": "Invalid request format"}'),
- (401, '{"error": "Authentication failed", "code": "AUTH_001"}'),
- (403, '{"error": "Access denied", "resource": "/v1/listen"}'),
- (404, '{"error": "Model not found", "model": "invalid-model"}'),
- (422, '{"error": "Validation failed", "fields": ["model", "language"]}'),
- (429, '{"error": "Rate limit exceeded", "retry_after": 60}'),
- (500, '{"error": "Internal server error", "incident_id": "inc-123"}'),
- (502, '{"error": "Bad gateway", "upstream": "transcription-service"}'),
- (503, '{"error": "Service unavailable", "maintenance": true}')
- ]
-
- for status_code, body in error_scenarios:
- error = ApiError(
- status_code=status_code,
- body=body,
- headers={"Content-Type": "application/json"}
- )
-
- assert error.status_code == status_code
- assert "error" in error.body
- assert error.headers["Content-Type"] == "application/json"
diff --git a/tests/unit/test_core_query_encoder.py b/tests/unit/test_core_query_encoder.py
deleted file mode 100644
index 48380c2f..00000000
--- a/tests/unit/test_core_query_encoder.py
+++ /dev/null
@@ -1,347 +0,0 @@
-"""
-Unit tests for core query encoder functionality.
-"""
-from pydantic import BaseModel
-
-from deepgram.core.query_encoder import encode_query, single_query_encoder, traverse_query_dict
-
-
-class TestTraverseQueryDict:
- """Test traverse_query_dict function."""
-
- def test_simple_dict(self):
- """Test traversing a simple flat dictionary."""
- input_dict = {"key1": "value1", "key2": "value2"}
- result = traverse_query_dict(input_dict)
-
- expected = [("key1", "value1"), ("key2", "value2")]
- assert sorted(result) == sorted(expected)
-
- def test_nested_dict(self):
- """Test traversing a nested dictionary."""
- input_dict = {
- "level1": {
- "level2": "value",
- "level2b": "value2"
- }
- }
- result = traverse_query_dict(input_dict)
-
- expected = [("level1[level2]", "value"), ("level1[level2b]", "value2")]
- assert sorted(result) == sorted(expected)
-
- def test_deeply_nested_dict(self):
- """Test traversing a deeply nested dictionary."""
- input_dict = {
- "level1": {
- "level2": {
- "level3": "deep_value"
- }
- }
- }
- result = traverse_query_dict(input_dict)
-
- expected = [("level1[level2][level3]", "deep_value")]
- assert result == expected
-
- def test_dict_with_list_values(self):
- """Test traversing dictionary with list values."""
- input_dict = {
- "simple_list": ["item1", "item2"],
- "complex_list": [{"nested": "value1"}, {"nested": "value2"}]
- }
- result = traverse_query_dict(input_dict)
-
- expected = [
- ("simple_list", "item1"),
- ("simple_list", "item2"),
- ("complex_list[nested]", "value1"),
- ("complex_list[nested]", "value2")
- ]
- assert sorted(result) == sorted(expected)
-
- def test_with_key_prefix(self):
- """Test traversing with a key prefix."""
- input_dict = {"key": "value"}
- result = traverse_query_dict(input_dict, "prefix")
-
- expected = [("prefix[key]", "value")]
- assert result == expected
-
- def test_empty_dict(self):
- """Test traversing an empty dictionary."""
- result = traverse_query_dict({})
- assert result == []
-
- def test_mixed_types(self):
- """Test traversing dictionary with mixed value types."""
- input_dict = {
- "string": "text",
- "number": 42,
- "boolean": True,
- "none": None,
- "nested": {"inner": "value"}
- }
- result = traverse_query_dict(input_dict)
-
- expected = [
- ("string", "text"),
- ("number", 42),
- ("boolean", True),
- ("none", None),
- ("nested[inner]", "value")
- ]
- assert sorted(result) == sorted(expected)
-
-
-class QueryTestModel(BaseModel):
- """Test Pydantic model for query encoder tests."""
- name: str
- age: int
- active: bool = True
-
- class Config:
- extra = "allow"
-
-
-class TestSingleQueryEncoder:
- """Test single_query_encoder function."""
-
- def test_simple_value(self):
- """Test encoding a simple value."""
- result = single_query_encoder("key", "value")
- assert result == [("key", "value")]
-
- def test_pydantic_model(self):
- """Test encoding a Pydantic model."""
- model = QueryTestModel(name="John", age=30)
- result = single_query_encoder("user", model)
-
- expected = [
- ("user[name]", "John"),
- ("user[age]", 30),
- ("user[active]", True)
- ]
- assert sorted(result) == sorted(expected)
-
- def test_dict_value(self):
- """Test encoding a dictionary value."""
- dict_value = {"nested": "value", "count": 5}
- result = single_query_encoder("data", dict_value)
-
- expected = [
- ("data[nested]", "value"),
- ("data[count]", 5)
- ]
- assert sorted(result) == sorted(expected)
-
- def test_list_of_simple_values(self):
- """Test encoding a list of simple values."""
- list_value = ["item1", "item2", "item3"]
- result = single_query_encoder("items", list_value)
-
- expected = [
- ("items", "item1"),
- ("items", "item2"),
- ("items", "item3")
- ]
- assert result == expected
-
- def test_list_of_pydantic_models(self):
- """Test encoding a list of Pydantic models."""
- models = [
- QueryTestModel(name="John", age=30),
- QueryTestModel(name="Jane", age=25, active=False)
- ]
- result = single_query_encoder("users", models)
-
- expected = [
- ("users[name]", "John"),
- ("users[age]", 30),
- ("users[active]", True),
- ("users[name]", "Jane"),
- ("users[age]", 25),
- ("users[active]", False)
- ]
- assert sorted(result) == sorted(expected)
-
- def test_list_of_dicts(self):
- """Test encoding a list of dictionaries."""
- dict_list = [
- {"name": "Item1", "value": 10},
- {"name": "Item2", "value": 20}
- ]
- result = single_query_encoder("data", dict_list)
-
- expected = [
- ("data[name]", "Item1"),
- ("data[value]", 10),
- ("data[name]", "Item2"),
- ("data[value]", 20)
- ]
- assert sorted(result) == sorted(expected)
-
- def test_mixed_list(self):
- """Test encoding a list with mixed types."""
- mixed_list = ["simple", {"nested": "value"}, 42]
- result = single_query_encoder("mixed", mixed_list)
-
- expected = [
- ("mixed", "simple"),
- ("mixed[nested]", "value"),
- ("mixed", 42)
- ]
- # Can't sort tuples with mixed types, so check length and contents
- assert len(result) == len(expected)
- for item in expected:
- assert item in result
-
-
-class TestEncodeQuery:
- """Test encode_query function."""
-
- def test_none_query(self):
- """Test encoding None query."""
- result = encode_query(None)
- assert result is None
-
- def test_empty_query(self):
- """Test encoding empty query."""
- result = encode_query({})
- assert result == []
-
- def test_simple_query(self):
- """Test encoding a simple query dictionary."""
- query = {
- "name": "John",
- "age": 30,
- "active": True
- }
- result = encode_query(query)
-
- expected = [
- ("name", "John"),
- ("age", 30),
- ("active", True)
- ]
- assert sorted(result) == sorted(expected)
-
- def test_complex_query(self):
- """Test encoding a complex query with nested structures."""
- query = {
- "user": {
- "name": "John",
- "details": {
- "age": 30,
- "active": True
- }
- },
- "tags": ["python", "testing"],
- "metadata": [
- {"key": "version", "value": "1.0"},
- {"key": "env", "value": "test"}
- ]
- }
- result = encode_query(query)
-
- expected = [
- ("user[name]", "John"),
- ("user[details][age]", 30),
- ("user[details][active]", True),
- ("tags", "python"),
- ("tags", "testing"),
- ("metadata[key]", "version"),
- ("metadata[value]", "1.0"),
- ("metadata[key]", "env"),
- ("metadata[value]", "test")
- ]
- assert sorted(result) == sorted(expected)
-
- def test_query_with_pydantic_models(self):
- """Test encoding query containing Pydantic models."""
- model = QueryTestModel(name="Alice", age=28)
- query = {
- "user": model,
- "simple": "value"
- }
- result = encode_query(query)
-
- expected = [
- ("user[name]", "Alice"),
- ("user[age]", 28),
- ("user[active]", True),
- ("simple", "value")
- ]
- assert sorted(result) == sorted(expected)
-
- def test_query_with_special_values(self):
- """Test encoding query with special values like None, empty strings."""
- query = {
- "empty_string": "",
- "none_value": None,
- "zero": 0,
- "false": False,
- "empty_list": [],
- "empty_dict": {}
- }
- result = encode_query(query)
-
- expected = [
- ("empty_string", ""),
- ("none_value", None),
- ("zero", 0),
- ("false", False)
- ]
- assert sorted(result) == sorted(expected)
-
-
-class TestQueryEncoderEdgeCases:
- """Test edge cases and error conditions."""
-
- def test_circular_reference_protection(self):
- """Test that circular references don't cause infinite loops."""
- # Create a circular reference
- dict_a = {"name": "A"}
- dict_b = {"name": "B", "ref": dict_a}
- dict_a["ref"] = dict_b
-
- # This should not hang or crash
- # Note: The current implementation doesn't handle circular refs,
- # but it should at least not crash for reasonable depths
- query = {"data": {"simple": "value"}} # Use a safe query instead
- result = encode_query(query)
- assert result == [("data[simple]", "value")]
-
- def test_very_deep_nesting(self):
- """Test handling of very deep nesting."""
- # Create a deeply nested structure
- deep_dict = {"value": "deep"}
- for i in range(10):
- deep_dict = {f"level{i}": deep_dict}
-
- result = traverse_query_dict(deep_dict)
- assert len(result) == 1
- # The key should have many levels of nesting
- key, value = result[0]
- assert value == "deep"
- assert key.count("[") == 10 # Should have 10 levels of nesting
-
- def test_unicode_and_special_characters(self):
- """Test handling of unicode and special characters."""
- query = {
- "unicode": "Hello δΈη",
- "special_chars": "!@#$%^&*()",
- "spaces": "value with spaces",
- "nested": {
- "Γ©moji": "π",
- "quotes": 'value"with"quotes'
- }
- }
- result = encode_query(query)
-
- # Should handle all characters properly
- assert ("unicode", "Hello δΈη") in result
- assert ("special_chars", "!@#$%^&*()") in result
- assert ("spaces", "value with spaces") in result
- assert ("nested[Γ©moji]", "π") in result
- assert ("nested[quotes]", 'value"with"quotes') in result
diff --git a/tests/unit/test_core_serialization.py b/tests/unit/test_core_serialization.py
deleted file mode 100644
index 3d039bcb..00000000
--- a/tests/unit/test_core_serialization.py
+++ /dev/null
@@ -1,409 +0,0 @@
-"""
-Unit tests for core serialization functionality.
-"""
-import pytest
-import typing
-from typing import Dict, List, Set, Optional, Union, Any
-from typing_extensions import TypedDict, Annotated
-
-from pydantic import BaseModel
-from deepgram.core.serialization import (
- FieldMetadata,
- convert_and_respect_annotation_metadata
-)
-
-
-class TestFieldMetadata:
- """Test FieldMetadata class."""
-
- def test_field_metadata_creation(self):
- """Test creating FieldMetadata instance."""
- metadata = FieldMetadata(alias="field_name")
- assert metadata.alias == "field_name"
-
- def test_field_metadata_with_different_aliases(self):
- """Test FieldMetadata with various alias formats."""
- # Simple alias
- metadata1 = FieldMetadata(alias="simple_alias")
- assert metadata1.alias == "simple_alias"
-
- # Snake case
- metadata2 = FieldMetadata(alias="snake_case_alias")
- assert metadata2.alias == "snake_case_alias"
-
- # Camel case
- metadata3 = FieldMetadata(alias="camelCaseAlias")
- assert metadata3.alias == "camelCaseAlias"
-
- # With special characters
- metadata4 = FieldMetadata(alias="field-with-dashes")
- assert metadata4.alias == "field-with-dashes"
-
-
-# Test models for serialization tests
-class SimpleTestModel(BaseModel):
- name: str
- age: int
- active: bool = True
-
-
-class SerializationTestTypedDict(TypedDict):
- name: str
- value: int
- optional_field: Optional[str]
-
-
-class SerializationTestTypedDictWithAlias(TypedDict):
- name: Annotated[str, FieldMetadata(alias="display_name")]
- value: Annotated[int, FieldMetadata(alias="numeric_value")]
- normal_field: str
-
-
-class TestConvertAndRespectAnnotationMetadata:
- """Test convert_and_respect_annotation_metadata function."""
-
- def test_none_object(self):
- """Test handling None object."""
- result = convert_and_respect_annotation_metadata(
- object_=None,
- annotation=str,
- direction="read"
- )
- assert result is None
-
- def test_simple_type_passthrough(self):
- """Test that simple types pass through unchanged."""
- # String
- result = convert_and_respect_annotation_metadata(
- object_="test_string",
- annotation=str,
- direction="read"
- )
- assert result == "test_string"
-
- # Integer
- result = convert_and_respect_annotation_metadata(
- object_=42,
- annotation=int,
- direction="read"
- )
- assert result == 42
-
- # Boolean
- result = convert_and_respect_annotation_metadata(
- object_=True,
- annotation=bool,
- direction="read"
- )
- assert result is True
-
- def test_pydantic_model_from_dict_read(self):
- """Test converting dict to Pydantic model (read direction)."""
- input_dict = {"name": "John", "age": 30, "active": False}
-
- result = convert_and_respect_annotation_metadata(
- object_=input_dict,
- annotation=SimpleTestModel,
- direction="read"
- )
-
- # Should process the dict for Pydantic model compatibility
- assert isinstance(result, dict)
- assert result["name"] == "John"
- assert result["age"] == 30
- assert result["active"] is False
-
- def test_pydantic_model_from_dict_write(self):
- """Test converting dict from Pydantic model (write direction)."""
- input_dict = {"name": "Alice", "age": 25}
-
- result = convert_and_respect_annotation_metadata(
- object_=input_dict,
- annotation=SimpleTestModel,
- direction="write"
- )
-
- # Should process the dict appropriately
- assert isinstance(result, dict)
- assert result["name"] == "Alice"
- assert result["age"] == 25
-
- def test_typed_dict_basic(self):
- """Test handling basic TypedDict."""
- input_dict = {"name": "Test", "value": 100, "optional_field": "optional"}
-
- result = convert_and_respect_annotation_metadata(
- object_=input_dict,
- annotation=SerializationTestTypedDict,
- direction="read"
- )
-
- assert isinstance(result, dict)
- assert result["name"] == "Test"
- assert result["value"] == 100
- assert result["optional_field"] == "optional"
-
- def test_dict_type_annotation(self):
- """Test handling Dict type annotation."""
- input_dict = {"key1": "value1", "key2": "value2"}
-
- result = convert_and_respect_annotation_metadata(
- object_=input_dict,
- annotation=Dict[str, str],
- direction="read"
- )
-
- assert isinstance(result, dict)
- assert result == input_dict
-
- def test_list_type_annotation(self):
- """Test handling List type annotation."""
- input_list = ["item1", "item2", "item3"]
-
- result = convert_and_respect_annotation_metadata(
- object_=input_list,
- annotation=List[str],
- direction="read"
- )
-
- assert isinstance(result, list)
- assert result == input_list
-
- def test_set_type_annotation(self):
- """Test handling Set type annotation."""
- input_set = {"item1", "item2", "item3"}
-
- result = convert_and_respect_annotation_metadata(
- object_=input_set,
- annotation=Set[str],
- direction="read"
- )
-
- assert isinstance(result, set)
- assert result == input_set
-
- def test_nested_dict_with_list(self):
- """Test handling nested Dict with List values."""
- input_dict = {
- "list1": ["a", "b", "c"],
- "list2": ["x", "y", "z"]
- }
-
- result = convert_and_respect_annotation_metadata(
- object_=input_dict,
- annotation=Dict[str, List[str]],
- direction="read"
- )
-
- assert isinstance(result, dict)
- assert result["list1"] == ["a", "b", "c"]
- assert result["list2"] == ["x", "y", "z"]
-
- def test_nested_list_with_dicts(self):
- """Test handling List containing dicts."""
- input_list = [
- {"name": "Item1", "value": 1},
- {"name": "Item2", "value": 2}
- ]
-
- result = convert_and_respect_annotation_metadata(
- object_=input_list,
- annotation=List[Dict[str, Any]],
- direction="read"
- )
-
- assert isinstance(result, list)
- assert len(result) == 2
- assert result[0]["name"] == "Item1"
- assert result[1]["value"] == 2
-
- def test_union_type_annotation(self):
- """Test handling Union type annotation."""
- # Test with string (first type in union)
- result1 = convert_and_respect_annotation_metadata(
- object_="test_string",
- annotation=Union[str, int],
- direction="read"
- )
- assert result1 == "test_string"
-
- # Test with int (second type in union)
- result2 = convert_and_respect_annotation_metadata(
- object_=42,
- annotation=Union[str, int],
- direction="read"
- )
- assert result2 == 42
-
- def test_optional_type_annotation(self):
- """Test handling Optional type annotation."""
- # Test with None
- result1 = convert_and_respect_annotation_metadata(
- object_=None,
- annotation=Optional[str],
- direction="read"
- )
- assert result1 is None
-
- # Test with actual value
- result2 = convert_and_respect_annotation_metadata(
- object_="test_value",
- annotation=Optional[str],
- direction="read"
- )
- assert result2 == "test_value"
-
- def test_string_not_treated_as_sequence(self):
- """Test that strings are not treated as sequences."""
- test_string = "hello"
-
- result = convert_and_respect_annotation_metadata(
- object_=test_string,
- annotation=str,
- direction="read"
- )
-
- # String should pass through unchanged, not be treated as sequence of chars
- assert result == "hello"
- assert isinstance(result, str)
-
- def test_complex_nested_structure(self):
- """Test handling complex nested data structures."""
- complex_data = {
- "users": [
- {"name": "John", "age": 30},
- {"name": "Jane", "age": 25}
- ],
- "metadata": {
- "version": "1.0",
- "tags": ["python", "testing"]
- },
- "flags": {"active", "verified"}
- }
-
- result = convert_and_respect_annotation_metadata(
- object_=complex_data,
- annotation=Dict[str, Any],
- direction="read"
- )
-
- assert isinstance(result, dict)
- assert len(result["users"]) == 2
- assert result["users"][0]["name"] == "John"
- assert result["metadata"]["version"] == "1.0"
- assert "python" in result["metadata"]["tags"]
-
- def test_inner_type_parameter(self):
- """Test using inner_type parameter."""
- input_data = ["item1", "item2"]
-
- result = convert_and_respect_annotation_metadata(
- object_=input_data,
- annotation=List[str],
- inner_type=List[str],
- direction="read"
- )
-
- assert isinstance(result, list)
- assert result == input_data
-
- def test_both_read_and_write_directions(self):
- """Test that both read and write directions work."""
- test_dict = {"key": "value"}
-
- # Test read direction
- result_read = convert_and_respect_annotation_metadata(
- object_=test_dict,
- annotation=Dict[str, str],
- direction="read"
- )
- assert result_read == test_dict
-
- # Test write direction
- result_write = convert_and_respect_annotation_metadata(
- object_=test_dict,
- annotation=Dict[str, str],
- direction="write"
- )
- assert result_write == test_dict
-
-
-class TestSerializationEdgeCases:
- """Test edge cases and error conditions."""
-
- def test_empty_collections(self):
- """Test handling empty collections."""
- # Empty dict
- result = convert_and_respect_annotation_metadata(
- object_={},
- annotation=Dict[str, str],
- direction="read"
- )
- assert result == {}
-
- # Empty list
- result = convert_and_respect_annotation_metadata(
- object_=[],
- annotation=List[str],
- direction="read"
- )
- assert result == []
-
- # Empty set
- result = convert_and_respect_annotation_metadata(
- object_=set(),
- annotation=Set[str],
- direction="read"
- )
- assert result == set()
-
- def test_mismatched_types(self):
- """Test handling when object type doesn't match annotation."""
- # This should generally pass through unchanged or handle gracefully
- result = convert_and_respect_annotation_metadata(
- object_="string_value",
- annotation=int, # Annotation says int, but object is string
- direction="read"
- )
- # Should not crash and return something reasonable
- assert result == "string_value"
-
- def test_deeply_nested_structures(self):
- """Test handling deeply nested structures."""
- deep_structure = {
- "level1": {
- "level2": {
- "level3": {
- "level4": ["deep", "values"]
- }
- }
- }
- }
-
- result = convert_and_respect_annotation_metadata(
- object_=deep_structure,
- annotation=Dict[str, Any],
- direction="read"
- )
-
- assert result["level1"]["level2"]["level3"]["level4"] == ["deep", "values"]
-
- def test_unicode_and_special_characters(self):
- """Test handling unicode and special characters."""
- unicode_data = {
- "chinese": "δ½ ε₯½δΈη",
- "emoji": "ππ",
- "special": "cafΓ© naΓ―ve",
- "mixed": ["Hello", "δΈη", "π"]
- }
-
- result = convert_and_respect_annotation_metadata(
- object_=unicode_data,
- annotation=Dict[str, Any],
- direction="read"
- )
-
- assert result["chinese"] == "δ½ ε₯½δΈη"
- assert result["emoji"] == "ππ"
- assert result["special"] == "cafΓ© naΓ―ve"
- assert "δΈη" in result["mixed"]
diff --git a/tests/unit/test_core_utils.py b/tests/unit/test_core_utils.py
deleted file mode 100644
index 2e24587a..00000000
--- a/tests/unit/test_core_utils.py
+++ /dev/null
@@ -1,280 +0,0 @@
-"""
-Unit tests for core utility functions.
-"""
-import pytest
-from typing import Dict, Any, Optional, Mapping
-
-from deepgram.core.remove_none_from_dict import remove_none_from_dict
-
-
-class TestRemoveNoneFromDict:
- """Test remove_none_from_dict function."""
-
- def test_empty_dict(self):
- """Test removing None values from empty dictionary."""
- result = remove_none_from_dict({})
- assert result == {}
-
- def test_no_none_values(self):
- """Test dictionary with no None values."""
- input_dict = {
- "string": "value",
- "number": 42,
- "boolean": True,
- "list": [1, 2, 3],
- "dict": {"nested": "value"}
- }
- result = remove_none_from_dict(input_dict)
- assert result == input_dict
-
- def test_all_none_values(self):
- """Test dictionary with all None values."""
- input_dict = {
- "key1": None,
- "key2": None,
- "key3": None
- }
- result = remove_none_from_dict(input_dict)
- assert result == {}
-
- def test_mixed_none_and_values(self):
- """Test dictionary with mix of None and non-None values."""
- input_dict = {
- "keep_string": "value",
- "remove_none": None,
- "keep_number": 42,
- "remove_another_none": None,
- "keep_boolean": False,
- "keep_empty_string": "",
- "keep_zero": 0
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- "keep_string": "value",
- "keep_number": 42,
- "keep_boolean": False,
- "keep_empty_string": "",
- "keep_zero": 0
- }
- assert result == expected
-
- def test_preserve_falsy_values(self):
- """Test that falsy values (except None) are preserved."""
- input_dict = {
- "empty_string": "",
- "zero": 0,
- "false": False,
- "empty_list": [],
- "empty_dict": {},
- "none_value": None
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- "empty_string": "",
- "zero": 0,
- "false": False,
- "empty_list": [],
- "empty_dict": {}
- }
- assert result == expected
-
- def test_nested_structures_with_none(self):
- """Test that nested structures containing None are preserved."""
- input_dict = {
- "nested_dict": {"inner": None, "keep": "value"},
- "nested_list": [None, "keep", None],
- "remove_this": None
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- "nested_dict": {"inner": None, "keep": "value"},
- "nested_list": [None, "keep", None]
- }
- assert result == expected
-
- def test_complex_data_types(self):
- """Test with complex data types."""
- class CustomObject:
- def __init__(self, value):
- self.value = value
-
- def __eq__(self, other):
- return isinstance(other, CustomObject) and self.value == other.value
-
- custom_obj = CustomObject("test")
- input_dict = {
- "custom_object": custom_obj,
- "tuple": (1, 2, 3),
- "set": {1, 2, 3},
- "none_value": None
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- "custom_object": custom_obj,
- "tuple": (1, 2, 3),
- "set": {1, 2, 3}
- }
- assert result == expected
-
- def test_original_dict_unchanged(self):
- """Test that original dictionary is not modified."""
- original = {
- "keep": "value",
- "remove": None
- }
- original_copy = original.copy()
-
- result = remove_none_from_dict(original)
-
- # Original should be unchanged
- assert original == original_copy
-
- # Result should be different
- assert result == {"keep": "value"}
- assert result != original
-
- def test_return_type(self):
- """Test that function returns correct type."""
- input_dict = {"key": "value", "none_key": None}
- result = remove_none_from_dict(input_dict)
-
- # Should return a Dict, not the original Mapping type
- assert isinstance(result, dict)
- assert not isinstance(result, type(input_dict)) or isinstance(input_dict, dict)
-
- def test_with_mapping_input(self):
- """Test with different Mapping types as input."""
- from collections import OrderedDict, defaultdict
-
- # Test with OrderedDict
- ordered_dict = OrderedDict([("keep", "value"), ("remove", None)])
- result = remove_none_from_dict(ordered_dict)
- assert result == {"keep": "value"}
- assert isinstance(result, dict) # Should return regular dict
-
- # Test with defaultdict
- default_dict = defaultdict(str)
- default_dict["keep"] = "value"
- default_dict["remove"] = None
- result = remove_none_from_dict(default_dict)
- assert result == {"keep": "value"}
- assert isinstance(result, dict) # Should return regular dict
-
- def test_unicode_keys(self):
- """Test with unicode keys."""
- input_dict = {
- "english": "value",
- "δΈζ": "chinese",
- "espaΓ±ol": None,
- "ΡΡΡΡΠΊΠΈΠΉ": "russian",
- "Ψ§ΩΨΉΨ±Ψ¨ΩΨ©": None,
- "π": "emoji_key"
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- "english": "value",
- "δΈζ": "chinese",
- "ΡΡΡΡΠΊΠΈΠΉ": "russian",
- "π": "emoji_key"
- }
- assert result == expected
-
- def test_numeric_and_special_keys(self):
- """Test with numeric and special character keys."""
- input_dict = {
- 1: "numeric_key",
- "normal_key": "value",
- (1, 2): "tuple_key",
- "remove_me": None,
- 42: None
- }
- result = remove_none_from_dict(input_dict)
-
- expected = {
- 1: "numeric_key",
- "normal_key": "value",
- (1, 2): "tuple_key"
- }
- assert result == expected
-
-
-class TestRemoveNoneFromDictEdgeCases:
- """Test edge cases and error conditions."""
-
- def test_very_large_dict(self):
- """Test with a very large dictionary."""
- # Create a large dictionary with alternating None and non-None values
- large_dict = {}
- for i in range(1000):
- if i % 2 == 0:
- large_dict[f"key_{i}"] = f"value_{i}"
- else:
- large_dict[f"key_{i}"] = None
-
- result = remove_none_from_dict(large_dict)
-
- # Should have 500 items (half of original)
- assert len(result) == 500
-
- # All values should be non-None
- for value in result.values():
- assert value is not None
-
- # All keys should be even-numbered
- for key in result.keys():
- key_num = int(key.split("_")[1])
- assert key_num % 2 == 0
-
- def test_deeply_nested_with_none_values(self):
- """Test that function only processes top level (doesn't recurse)."""
- input_dict = {
- "level1": {
- "level2": {
- "level3": None,
- "keep": "value"
- },
- "also_none": None
- },
- "top_level_none": None
- }
- result = remove_none_from_dict(input_dict)
-
- # Only top-level None should be removed
- expected = {
- "level1": {
- "level2": {
- "level3": None, # This None should remain
- "keep": "value"
- },
- "also_none": None # This None should remain
- }
- }
- assert result == expected
-
- def test_performance_with_many_none_values(self):
- """Test performance with dictionary having many None values."""
- import time
-
- # Create dictionary with mostly None values
- large_dict = {f"key_{i}": None for i in range(10000)}
- large_dict.update({f"keep_{i}": f"value_{i}" for i in range(100)})
-
- start_time = time.time()
- result = remove_none_from_dict(large_dict)
- end_time = time.time()
-
- # Should complete quickly (less than 1 second)
- assert (end_time - start_time) < 1.0
-
- # Should only have the 100 non-None values
- assert len(result) == 100
-
- # All remaining values should be non-None
- for key, value in result.items():
- assert key.startswith("keep_")
- assert value is not None
diff --git a/tests/unit/test_http_internals.py b/tests/unit/test_http_internals.py
deleted file mode 100644
index 05c015a2..00000000
--- a/tests/unit/test_http_internals.py
+++ /dev/null
@@ -1,820 +0,0 @@
-"""
-Unit tests for HTTP internals and client wrappers.
-Tests HTTP client functionality, response wrappers, retry logic, and request options.
-"""
-
-import pytest
-import asyncio
-import time
-import typing
-from unittest.mock import Mock, patch, MagicMock
-from datetime import datetime, timezone
-import httpx
-
-from deepgram.core.http_client import (
- HttpClient,
- AsyncHttpClient,
- get_request_body,
- _parse_retry_after,
- _should_retry,
- _retry_timeout,
- INITIAL_RETRY_DELAY_SECONDS,
- MAX_RETRY_DELAY_SECONDS
-)
-from deepgram.core.http_response import BaseHttpResponse, HttpResponse, AsyncHttpResponse
-from deepgram.core.client_wrapper import BaseClientWrapper, SyncClientWrapper, AsyncClientWrapper
-from deepgram.core.request_options import RequestOptions
-from deepgram.environment import DeepgramClientEnvironment
-
-
-class TestHttpClientUtilities:
- """Test HTTP client utility functions."""
-
- def test_parse_retry_after_ms_header(self):
- """Test parsing retry-after-ms header."""
- headers = httpx.Headers({"retry-after-ms": "1500"})
- result = _parse_retry_after(headers)
- # The actual implementation has a bug: it compares string > 0 which is always True
- # So it should work and return 1.5, but the implementation might have issues
- # Let's test what actually happens
- if result is not None:
- assert result == 1.5
- else:
- # Implementation might not handle this correctly
- pass
-
- def test_parse_retry_after_ms_header_zero(self):
- """Test parsing retry-after-ms header with zero value."""
- headers = httpx.Headers({"retry-after-ms": "0"})
- result = _parse_retry_after(headers)
- # String "0" > 0 is True in Python, so this returns 0/1000 = 0
- if result is not None:
- assert result == 0
- else:
- # Implementation might not handle this correctly
- pass
-
- def test_parse_retry_after_ms_header_invalid(self):
- """Test parsing invalid retry-after-ms header."""
- headers = httpx.Headers({"retry-after-ms": "invalid"})
- result = _parse_retry_after(headers)
- assert result is None
-
- def test_parse_retry_after_seconds_header(self):
- """Test parsing retry-after header with seconds."""
- headers = httpx.Headers({"retry-after": "120"})
- result = _parse_retry_after(headers)
- assert result == 120.0
-
- def test_parse_retry_after_http_date_header(self):
- """Test parsing retry-after header with HTTP date."""
- future_time = time.time() + 60
- http_date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(future_time))
- headers = httpx.Headers({"retry-after": http_date})
- result = _parse_retry_after(headers)
- # Should be approximately 60 seconds (allowing some tolerance)
- assert result is not None
- assert 55 <= result <= 65
-
- def test_parse_retry_after_invalid_date(self):
- """Test parsing retry-after header with invalid date."""
- headers = httpx.Headers({"retry-after": "invalid-date"})
- result = _parse_retry_after(headers)
- assert result is None
-
- def test_parse_retry_after_no_header(self):
- """Test parsing when no retry-after header is present."""
- headers = httpx.Headers({})
- result = _parse_retry_after(headers)
- assert result is None
-
- def test_should_retry_429(self):
- """Test should_retry with 429 status code."""
- response = Mock()
- response.status_code = 429
- assert _should_retry(response) is True
-
- def test_should_retry_502(self):
- """Test should_retry with 502 status code."""
- response = Mock()
- response.status_code = 502
- assert _should_retry(response) is True
-
- def test_should_retry_503(self):
- """Test should_retry with 503 status code."""
- response = Mock()
- response.status_code = 503
- assert _should_retry(response) is True
-
- def test_should_retry_504(self):
- """Test should_retry with 504 status code."""
- response = Mock()
- response.status_code = 504
- assert _should_retry(response) is True
-
- def test_should_not_retry_200(self):
- """Test should_retry with 200 status code."""
- response = Mock()
- response.status_code = 200
- assert _should_retry(response) is False
-
- def test_should_not_retry_400(self):
- """Test should_retry with 400 status code."""
- response = Mock()
- response.status_code = 400
- assert _should_retry(response) is False
-
- def test_should_retry_500(self):
- """Test should_retry with 500 status code."""
- response = Mock()
- response.status_code = 500
- # 500 >= 500 is True, so it should retry
- assert _should_retry(response) is True
-
- def test_retry_timeout_with_retry_after(self):
- """Test retry timeout calculation with retry-after header."""
- response = Mock()
- response.headers = httpx.Headers({"retry-after": "30"})
- result = _retry_timeout(response, retries=1)
- assert result == 30.0
-
- def test_retry_timeout_without_retry_after(self):
- """Test retry timeout calculation without retry-after header."""
- response = Mock()
- response.headers = httpx.Headers({})
- result = _retry_timeout(response, retries=1)
- # Should use exponential backoff with jitter, so it won't be exact
- expected = INITIAL_RETRY_DELAY_SECONDS * (2 ** 1)
- # Result should be within reasonable range due to jitter
- assert 0.5 <= result <= expected
-
- def test_retry_timeout_max_delay(self):
- """Test retry timeout calculation with maximum delay."""
- response = Mock()
- response.headers = httpx.Headers({})
- result = _retry_timeout(response, retries=10)
- # Should be capped at MAX_RETRY_DELAY_SECONDS with jitter applied
- # Jitter reduces the delay by up to 25%
- min_expected = MAX_RETRY_DELAY_SECONDS * 0.75
- assert min_expected <= result <= MAX_RETRY_DELAY_SECONDS
-
- def test_get_request_body_json_only(self):
- """Test get_request_body with JSON only."""
- json_data = {"key": "value"}
- json_body, data_body = get_request_body(
- json=json_data,
- data=None,
- request_options=None,
- omit=None
- )
- assert json_body == json_data
- assert data_body is None
-
- def test_get_request_body_data_only(self):
- """Test get_request_body with data only."""
- form_data = {"field": "value"}
- json_body, data_body = get_request_body(
- json=None,
- data=form_data,
- request_options=None,
- omit=None
- )
- assert json_body is None
- assert data_body == form_data
-
- def test_get_request_body_both_json_and_data(self):
- """Test get_request_body with both JSON and data."""
- json_data = {"json_key": "json_value"}
- form_data = {"form_key": "form_value"}
- json_body, data_body = get_request_body(
- json=json_data,
- data=form_data,
- request_options=None,
- omit=None
- )
- # The implementation might prioritize one over the other
- # Let's check what actually happens
- if json_body is not None:
- assert isinstance(json_body, dict)
- if data_body is not None:
- assert isinstance(data_body, dict)
-
- def test_get_request_body_empty_json(self):
- """Test get_request_body with empty JSON."""
- json_body, data_body = get_request_body(
- json={},
- data=None,
- request_options=None,
- omit=None
- )
- assert json_body is None # Empty JSON should become None
- assert data_body is None
-
- def test_get_request_body_empty_data(self):
- """Test get_request_body with empty data."""
- json_body, data_body = get_request_body(
- json=None,
- data={},
- request_options=None,
- omit=None
- )
- assert json_body is None
- assert data_body is None # Empty data should become None
-
- def test_get_request_body_with_request_options(self):
- """Test get_request_body with additional body parameters."""
- request_options: RequestOptions = {
- "additional_body_parameters": {"extra_param": "extra_value"}
- }
- json_data = {"original": "data"}
-
- json_body, data_body = get_request_body(
- json=json_data,
- data=None,
- request_options=request_options,
- omit=None
- )
-
- # Should merge additional parameters
- expected = {"original": "data", "extra_param": "extra_value"}
- assert json_body == expected
- assert data_body is None
-
-
-class TestHttpClient:
- """Test HttpClient class."""
-
- def test_http_client_initialization(self):
- """Test HttpClient initialization."""
- mock_httpx_client = Mock(spec=httpx.Client)
- base_timeout = lambda: 30.0
- base_headers = lambda: {"Authorization": "Token test"}
- base_url = lambda: "https://api.deepgram.com"
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=base_timeout,
- base_headers=base_headers,
- base_url=base_url
- )
-
- assert client.httpx_client == mock_httpx_client
- assert client.base_timeout == base_timeout
- assert client.base_headers == base_headers
- assert client.base_url == base_url
-
- def test_get_base_url_with_provided_url(self):
- """Test get_base_url with provided URL."""
- mock_httpx_client = Mock(spec=httpx.Client)
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {},
- base_url=lambda: "https://default.com"
- )
-
- result = client.get_base_url("https://custom.com")
- assert result == "https://custom.com"
-
- def test_get_base_url_with_default_url(self):
- """Test get_base_url with default URL."""
- mock_httpx_client = Mock(spec=httpx.Client)
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {},
- base_url=lambda: "https://default.com"
- )
-
- result = client.get_base_url(None)
- assert result == "https://default.com"
-
- def test_get_base_url_no_default_raises_error(self):
- """Test get_base_url raises error when no URL is available."""
- mock_httpx_client = Mock(spec=httpx.Client)
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {},
- base_url=None
- )
-
- with pytest.raises(ValueError, match="A base_url is required"):
- client.get_base_url(None)
-
- @patch('time.sleep')
- def test_request_with_retry(self, mock_sleep):
- """Test HTTP request with retry logic."""
- mock_httpx_client = Mock(spec=httpx.Client)
-
- # First call returns 429, second call returns 200
- mock_response_429 = Mock()
- mock_response_429.status_code = 429
- mock_response_429.headers = httpx.Headers({"retry-after": "1"})
-
- mock_response_200 = Mock()
- mock_response_200.status_code = 200
-
- mock_httpx_client.request.side_effect = [mock_response_429, mock_response_200]
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- request_options: RequestOptions = {"max_retries": 2}
-
- result = client.request(
- path="/v1/test",
- method="GET",
- request_options=request_options
- )
-
- # Verify that retry logic was attempted
- assert mock_httpx_client.request.call_count >= 1
- # The exact result depends on the implementation
-
- def test_request_max_retries_exceeded(self):
- """Test HTTP request when max retries are exceeded."""
- mock_httpx_client = Mock(spec=httpx.Client)
-
- mock_response_429 = Mock()
- mock_response_429.status_code = 429
- mock_response_429.headers = httpx.Headers({})
-
- mock_httpx_client.request.return_value = mock_response_429
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- request_options: RequestOptions = {"max_retries": 1}
-
- result = client.request(
- path="/v1/test",
- method="GET",
- request_options=request_options,
- retries=2 # Already exceeded max_retries
- )
-
- # Should return the failed response without retrying
- assert result == mock_response_429
- assert mock_httpx_client.request.call_count == 1
-
- def test_request_with_custom_headers(self):
- """Test HTTP request with custom headers."""
- mock_httpx_client = Mock(spec=httpx.Client)
- mock_response = Mock()
- mock_response.status_code = 200
- mock_httpx_client.request.return_value = mock_response
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- custom_headers = {"X-Custom": "value"}
- request_options: RequestOptions = {
- "additional_headers": {"X-Additional": "additional"}
- }
-
- client.request(
- path="/v1/test",
- method="POST",
- headers=custom_headers,
- request_options=request_options
- )
-
- # Verify headers were merged correctly
- call_args = mock_httpx_client.request.call_args
- headers = call_args[1]["headers"]
- assert "Authorization" in headers # Base header
- assert "X-Custom" in headers # Custom header
- assert "X-Additional" in headers # Request options header
-
- def test_request_with_files_and_force_multipart(self):
- """Test HTTP request with files and force multipart."""
- mock_httpx_client = Mock(spec=httpx.Client)
- mock_response = Mock()
- mock_response.status_code = 200
- mock_httpx_client.request.return_value = mock_response
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- # Test force_multipart when no files are provided
- client.request(
- path="/v1/test",
- method="POST",
- force_multipart=True
- )
-
- call_args = mock_httpx_client.request.call_args
- files = call_args[1]["files"]
- assert files is not None # Should have FORCE_MULTIPART
-
- def test_stream_context_manager(self):
- """Test stream context manager."""
- mock_httpx_client = Mock(spec=httpx.Client)
- mock_stream = Mock()
- mock_httpx_client.stream.return_value.__enter__ = Mock(return_value=mock_stream)
- mock_httpx_client.stream.return_value.__exit__ = Mock(return_value=None)
-
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- with client.stream(path="/v1/test", method="GET") as stream:
- assert stream == mock_stream
-
- mock_httpx_client.stream.assert_called_once()
-
-
-class TestAsyncHttpClient:
- """Test AsyncHttpClient class."""
-
- def test_async_http_client_initialization(self):
- """Test AsyncHttpClient initialization."""
- mock_httpx_client = Mock(spec=httpx.AsyncClient)
- base_timeout = lambda: 30.0
- base_headers = lambda: {"Authorization": "Token test"}
- base_url = lambda: "https://api.deepgram.com"
-
- client = AsyncHttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=base_timeout,
- base_headers=base_headers,
- base_url=base_url
- )
-
- assert client.httpx_client == mock_httpx_client
- assert client.base_timeout == base_timeout
- assert client.base_headers == base_headers
- assert client.base_url == base_url
-
- @pytest.mark.asyncio
- async def test_async_request_with_retry(self):
- """Test async HTTP request with retry logic."""
- mock_httpx_client = Mock(spec=httpx.AsyncClient)
-
- # First call returns 503, second call returns 200
- mock_response_503 = Mock()
- mock_response_503.status_code = 503
- mock_response_503.headers = httpx.Headers({"retry-after": "2"})
-
- mock_response_200 = Mock()
- mock_response_200.status_code = 200
-
- mock_httpx_client.request.side_effect = [mock_response_503, mock_response_200]
-
- client = AsyncHttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- request_options: RequestOptions = {"max_retries": 2}
-
- with patch('asyncio.sleep') as mock_sleep:
- result = await client.request(
- path="/v1/test",
- method="GET",
- request_options=request_options
- )
-
- # Verify that retry logic was attempted
- assert mock_httpx_client.request.call_count >= 1
- # The exact result depends on the implementation
-
- @pytest.mark.asyncio
- async def test_async_stream_context_manager(self):
- """Test async stream context manager."""
- # This test is complex to mock properly, so let's just verify the client
- # has the stream method and it's callable
- mock_httpx_client = Mock(spec=httpx.AsyncClient)
-
- client = AsyncHttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {"Authorization": "Token test"},
- base_url=lambda: "https://api.deepgram.com"
- )
-
- # Verify stream method exists and is callable
- assert hasattr(client, 'stream')
- assert callable(client.stream)
-
-
-class TestHttpResponse:
- """Test HTTP response wrapper classes."""
-
- def test_base_http_response(self):
- """Test BaseHttpResponse functionality."""
- mock_httpx_response = Mock(spec=httpx.Response)
- mock_httpx_response.headers = httpx.Headers({
- "Content-Type": "application/json",
- "X-Request-ID": "123456"
- })
-
- response = BaseHttpResponse(mock_httpx_response)
-
- assert response._response == mock_httpx_response
- # httpx.Headers normalizes header names to lowercase
- assert response.headers == {
- "content-type": "application/json",
- "x-request-id": "123456"
- }
-
- def test_http_response(self):
- """Test HttpResponse functionality."""
- mock_httpx_response = Mock(spec=httpx.Response)
- mock_httpx_response.headers = httpx.Headers({"Content-Type": "application/json"})
- mock_httpx_response.close = Mock()
-
- data = {"result": "success"}
- response = HttpResponse(mock_httpx_response, data)
-
- assert response._response == mock_httpx_response
- assert response._data == data
- assert response.data == data
- assert response.headers == {"content-type": "application/json"}
-
- # Test close method
- response.close()
- mock_httpx_response.close.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_async_http_response(self):
- """Test AsyncHttpResponse functionality."""
- mock_httpx_response = Mock(spec=httpx.Response)
- mock_httpx_response.headers = httpx.Headers({"Content-Type": "application/json"})
- mock_httpx_response.aclose = Mock(return_value=asyncio.Future())
- mock_httpx_response.aclose.return_value.set_result(None)
-
- data = {"result": "success"}
- response = AsyncHttpResponse(mock_httpx_response, data)
-
- assert response._response == mock_httpx_response
- assert response._data == data
- assert response.data == data
- assert response.headers == {"content-type": "application/json"}
-
- # Test async close method
- await response.close()
- mock_httpx_response.aclose.assert_called_once()
-
-
-class TestClientWrappers:
- """Test client wrapper classes."""
-
- def test_base_client_wrapper(self):
- """Test BaseClientWrapper functionality."""
- wrapper = BaseClientWrapper(
- api_key="test_key",
- headers={"X-Custom": "value"},
- environment=DeepgramClientEnvironment.PRODUCTION,
- timeout=60.0
- )
-
- assert wrapper.api_key == "test_key"
- assert wrapper._headers == {"X-Custom": "value"}
- assert wrapper._environment == DeepgramClientEnvironment.PRODUCTION
- assert wrapper._timeout == 60.0
-
- def test_base_client_wrapper_get_headers(self):
- """Test BaseClientWrapper header generation."""
- wrapper = BaseClientWrapper(
- api_key="test_key",
- headers={"X-Custom": "value"},
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- headers = wrapper.get_headers()
-
- assert "Authorization" in headers
- assert headers["Authorization"] == "Token test_key"
- assert "X-Fern-Language" in headers
- assert headers["X-Fern-Language"] == "Python"
- assert "X-Fern-SDK-Name" in headers
- assert "X-Fern-SDK-Version" in headers
- assert "X-Custom" in headers
- assert headers["X-Custom"] == "value"
-
- def test_base_client_wrapper_custom_headers_none(self):
- """Test BaseClientWrapper with no custom headers."""
- wrapper = BaseClientWrapper(
- api_key="test_key",
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- headers = wrapper.get_headers()
- assert "Authorization" in headers
- assert "X-Fern-Language" in headers
-
- def test_base_client_wrapper_getters(self):
- """Test BaseClientWrapper getter methods."""
- wrapper = BaseClientWrapper(
- api_key="test_key",
- headers={"X-Custom": "value"},
- environment=DeepgramClientEnvironment.PRODUCTION,
- timeout=120.0
- )
-
- assert wrapper.get_custom_headers() == {"X-Custom": "value"}
- assert wrapper.get_environment() == DeepgramClientEnvironment.PRODUCTION
- assert wrapper.get_timeout() == 120.0
-
- def test_sync_client_wrapper(self):
- """Test SyncClientWrapper functionality."""
- mock_httpx_client = Mock(spec=httpx.Client)
-
- wrapper = SyncClientWrapper(
- api_key="test_key",
- headers={"X-Custom": "value"},
- environment=DeepgramClientEnvironment.PRODUCTION,
- timeout=60.0,
- httpx_client=mock_httpx_client
- )
-
- assert isinstance(wrapper.httpx_client, HttpClient)
- assert wrapper.httpx_client.httpx_client == mock_httpx_client
-
- def test_async_client_wrapper(self):
- """Test AsyncClientWrapper functionality."""
- mock_httpx_client = Mock(spec=httpx.AsyncClient)
-
- wrapper = AsyncClientWrapper(
- api_key="test_key",
- headers={"X-Custom": "value"},
- environment=DeepgramClientEnvironment.PRODUCTION,
- timeout=60.0,
- httpx_client=mock_httpx_client
- )
-
- assert isinstance(wrapper.httpx_client, AsyncHttpClient)
- assert wrapper.httpx_client.httpx_client == mock_httpx_client
-
-
-class TestRequestOptions:
- """Test RequestOptions TypedDict."""
-
- def test_request_options_all_fields(self):
- """Test RequestOptions with all fields."""
- options: RequestOptions = {
- "timeout_in_seconds": 30,
- "max_retries": 3,
- "additional_headers": {"X-Custom": "value"},
- "additional_query_parameters": {"param": "value"},
- "additional_body_parameters": {"body_param": "value"},
- "chunk_size": 8192
- }
-
- assert options["timeout_in_seconds"] == 30
- assert options["max_retries"] == 3
- assert options["additional_headers"]["X-Custom"] == "value"
- assert options["additional_query_parameters"]["param"] == "value"
- assert options["additional_body_parameters"]["body_param"] == "value"
- assert options["chunk_size"] == 8192
-
- def test_request_options_partial_fields(self):
- """Test RequestOptions with partial fields."""
- options: RequestOptions = {
- "timeout_in_seconds": 60,
- "additional_headers": {"Authorization": "Bearer token"}
- }
-
- assert options["timeout_in_seconds"] == 60
- assert options["additional_headers"]["Authorization"] == "Bearer token"
- # Other fields should not be required
- assert "max_retries" not in options
- assert "chunk_size" not in options
-
- def test_request_options_empty(self):
- """Test empty RequestOptions."""
- options: RequestOptions = {}
-
- # Should be valid empty dict
- assert isinstance(options, dict)
- assert len(options) == 0
-
-
-class TestHttpInternalsEdgeCases:
- """Test edge cases and error scenarios for HTTP internals."""
-
- def test_parse_retry_after_with_large_ms_value(self):
- """Test parsing retry-after-ms with very large value."""
- headers = httpx.Headers({"retry-after-ms": "999999999"})
- result = _parse_retry_after(headers)
- # The implementation might not handle this correctly due to string comparison
- if result is not None:
- assert result == 999999999 / 1000
- else:
- # Implementation might not handle this correctly
- pass
-
- def test_parse_retry_after_with_negative_seconds(self):
- """Test parsing retry-after with negative seconds."""
- headers = httpx.Headers({"retry-after": "-10"})
- result = _parse_retry_after(headers)
- # The implementation might not parse negative values as valid integers
- # Let's check what actually happens
- if result is not None:
- assert result == 0.0 # Should be clamped to 0
- else:
- # Implementation might reject negative values entirely
- pass
-
- def test_retry_timeout_with_very_large_retry_after(self):
- """Test retry timeout with very large retry-after value."""
- response = Mock()
- response.headers = httpx.Headers({"retry-after": "999999"})
- result = _retry_timeout(response, retries=1)
- # Very large retry-after values should fall back to exponential backoff
- # So the result should be within the exponential backoff range
- assert 0.5 <= result <= 10.0
-
- def test_get_request_body_with_omit_parameter(self):
- """Test get_request_body with omit parameter."""
- json_data = {"keep": "this", "omit": "this"}
- json_body, data_body = get_request_body(
- json=json_data,
- data=None,
- request_options=None,
- omit=["omit"]
- )
-
- # The actual implementation might not handle omit in get_request_body
- # This test verifies the function doesn't crash with omit parameter
- assert json_body is not None
- assert data_body is None
-
- def test_http_client_with_none_base_url_callable(self):
- """Test HttpClient with None base_url callable."""
- mock_httpx_client = Mock(spec=httpx.Client)
- client = HttpClient(
- httpx_client=mock_httpx_client,
- base_timeout=lambda: 30.0,
- base_headers=lambda: {},
- base_url=None
- )
-
- # Should work when explicit base_url is provided
- result = client.get_base_url("https://explicit.com")
- assert result == "https://explicit.com"
-
- def test_http_response_with_complex_data_types(self):
- """Test HttpResponse with complex data types."""
- mock_httpx_response = Mock(spec=httpx.Response)
- mock_httpx_response.headers = httpx.Headers({})
- mock_httpx_response.close = Mock()
-
- # Test with various data types
- complex_data = {
- "list": [1, 2, 3],
- "nested": {"inner": "value"},
- "none_value": None,
- "boolean": True,
- "number": 42.5
- }
-
- response = HttpResponse(mock_httpx_response, complex_data)
- assert response.data == complex_data
- assert response.data["list"] == [1, 2, 3]
- assert response.data["nested"]["inner"] == "value"
- assert response.data["none_value"] is None
-
- def test_client_wrapper_with_different_environments(self):
- """Test client wrapper with different environments."""
- for env in [DeepgramClientEnvironment.PRODUCTION, DeepgramClientEnvironment.AGENT]:
- wrapper = BaseClientWrapper(
- api_key="test_key",
- environment=env
- )
- assert wrapper.get_environment() == env
-
- def test_client_wrapper_headers_with_special_characters(self):
- """Test client wrapper headers with special characters."""
- wrapper = BaseClientWrapper(
- api_key="test_key_with_special_chars_!@#$%",
- headers={"X-Special": "value_with_unicode_ζ΅θ―"},
- environment=DeepgramClientEnvironment.PRODUCTION
- )
-
- headers = wrapper.get_headers()
- assert headers["Authorization"] == "Token test_key_with_special_chars_!@#$%"
- assert headers["X-Special"] == "value_with_unicode_ζ΅θ―"
diff --git a/tests/unit/test_listen_v1_models.py b/tests/unit/test_listen_v1_models.py
deleted file mode 100644
index 3beb2ff3..00000000
--- a/tests/unit/test_listen_v1_models.py
+++ /dev/null
@@ -1,378 +0,0 @@
-"""
-Unit tests for Listen V1 socket event models.
-"""
-import pytest
-from pydantic import ValidationError
-
-from deepgram.extensions.types.sockets.listen_v1_metadata_event import ListenV1MetadataEvent
-from deepgram.extensions.types.sockets.listen_v1_results_event import ListenV1ResultsEvent
-from deepgram.extensions.types.sockets.listen_v1_speech_started_event import ListenV1SpeechStartedEvent
-from deepgram.extensions.types.sockets.listen_v1_utterance_end_event import ListenV1UtteranceEndEvent
-from deepgram.extensions.types.sockets.listen_v1_control_message import ListenV1ControlMessage
-from deepgram.extensions.types.sockets.listen_v1_media_message import ListenV1MediaMessage
-
-
-class TestListenV1MetadataEvent:
- """Test ListenV1MetadataEvent model."""
-
- def test_valid_metadata_event(self, valid_model_data):
- """Test creating a valid metadata event."""
- data = valid_model_data("listen_v1_metadata")
- event = ListenV1MetadataEvent(**data)
-
- assert event.type == "Metadata"
- assert event.request_id == "test-123"
- assert event.sha256 == "abc123"
- assert event.created == "2023-01-01T00:00:00Z"
- assert event.duration == 1.0
- assert event.channels == 1
-
- def test_metadata_event_serialization(self, valid_model_data):
- """Test metadata event serialization."""
- data = valid_model_data("listen_v1_metadata")
- event = ListenV1MetadataEvent(**data)
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Metadata"
- assert event_dict["request_id"] == "test-123"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Metadata"' in json_str
- assert '"request_id":"test-123"' in json_str
-
- def test_metadata_event_missing_required_fields(self):
- """Test metadata event with missing required fields."""
- # Missing request_id
- with pytest.raises(ValidationError) as exc_info:
- ListenV1MetadataEvent(
- type="Metadata",
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration=1.0,
- channels=1
- )
- assert "request_id" in str(exc_info.value)
-
- # Missing sha256
- with pytest.raises(ValidationError) as exc_info:
- ListenV1MetadataEvent(
- type="Metadata",
- request_id="test-123",
- created="2023-01-01T00:00:00Z",
- duration=1.0,
- channels=1
- )
- assert "sha256" in str(exc_info.value)
-
- def test_metadata_event_wrong_type(self):
- """Test metadata event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1MetadataEvent(
- type="Results", # Wrong type
- request_id="test-123",
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration=1.0,
- channels=1
- )
- assert "Input should be 'Metadata'" in str(exc_info.value)
-
- def test_metadata_event_invalid_data_types(self):
- """Test metadata event with invalid data types."""
- # Invalid duration type
- with pytest.raises(ValidationError) as exc_info:
- ListenV1MetadataEvent(
- type="Metadata",
- request_id="test-123",
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration="not_a_number",
- channels=1
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
- # Invalid channels type
- with pytest.raises(ValidationError) as exc_info:
- ListenV1MetadataEvent(
- type="Metadata",
- request_id="test-123",
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration=1.0,
- channels="not_a_number"
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
-
-class TestListenV1ResultsEvent:
- """Test ListenV1ResultsEvent model."""
-
- def test_valid_results_event(self, valid_model_data):
- """Test creating a valid results event."""
- data = valid_model_data("listen_v1_results")
- event = ListenV1ResultsEvent(**data)
-
- assert event.type == "Results"
- assert event.channel_index == [0]
- assert event.duration == 1.0
- assert event.start == 0.0
- assert event.is_final is True
- assert event.channel is not None
- assert event.metadata is not None
-
- def test_results_event_serialization(self, valid_model_data):
- """Test results event serialization."""
- data = valid_model_data("listen_v1_results")
- event = ListenV1ResultsEvent(**data)
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Results"
- assert event_dict["channel_index"] == [0]
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Results"' in json_str
-
- def test_results_event_missing_required_fields(self):
- """Test results event with missing required fields."""
- # Missing channel
- with pytest.raises(ValidationError) as exc_info:
- ListenV1ResultsEvent(
- type="Results",
- channel_index=[0],
- duration=1.0,
- start=0.0,
- is_final=True,
- metadata={"request_id": "test-123"}
- )
- assert "channel" in str(exc_info.value)
-
- def test_results_event_wrong_type(self):
- """Test results event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1ResultsEvent(
- type="Metadata", # Wrong type
- channel_index=[0],
- duration=1.0,
- start=0.0,
- is_final=True,
- channel={"alternatives": []},
- metadata={"request_id": "test-123"}
- )
- assert "Input should be 'Results'" in str(exc_info.value)
-
-
-class TestListenV1SpeechStartedEvent:
- """Test ListenV1SpeechStartedEvent model."""
-
- def test_valid_speech_started_event(self):
- """Test creating a valid speech started event."""
- event = ListenV1SpeechStartedEvent(
- type="SpeechStarted",
- channel=[0],
- timestamp=1672531200.0
- )
-
- assert event.type == "SpeechStarted"
- assert event.channel == [0]
- assert event.timestamp == 1672531200.0
-
- def test_speech_started_event_serialization(self):
- """Test speech started event serialization."""
- event = ListenV1SpeechStartedEvent(
- type="SpeechStarted",
- channel=[0],
- timestamp=1672531200.0
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "SpeechStarted"
- assert event_dict["channel"] == [0]
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"SpeechStarted"' in json_str
-
- def test_speech_started_event_missing_fields(self):
- """Test speech started event with missing required fields."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1SpeechStartedEvent(
- type="SpeechStarted",
- channel=[0]
- # Missing timestamp
- )
- assert "timestamp" in str(exc_info.value)
-
- def test_speech_started_event_wrong_type(self):
- """Test speech started event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1SpeechStartedEvent(
- type="Results", # Wrong type
- channel=[0],
- timestamp="2023-01-01T00:00:00Z"
- )
- assert "Input should be 'SpeechStarted'" in str(exc_info.value)
-
-
-class TestListenV1UtteranceEndEvent:
- """Test ListenV1UtteranceEndEvent model."""
-
- def test_valid_utterance_end_event(self):
- """Test creating a valid utterance end event."""
- event = ListenV1UtteranceEndEvent(
- type="UtteranceEnd",
- channel=[0],
- last_word_end=1.5
- )
-
- assert event.type == "UtteranceEnd"
- assert event.channel == [0]
- assert event.last_word_end == 1.5
-
- def test_utterance_end_event_serialization(self):
- """Test utterance end event serialization."""
- event = ListenV1UtteranceEndEvent(
- type="UtteranceEnd",
- channel=[0],
- last_word_end=1.5
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "UtteranceEnd"
- assert event_dict["last_word_end"] == 1.5
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"UtteranceEnd"' in json_str
-
- def test_utterance_end_event_missing_fields(self):
- """Test utterance end event with missing required fields."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1UtteranceEndEvent(
- type="UtteranceEnd",
- channel=[0]
- # Missing last_word_end
- )
- assert "last_word_end" in str(exc_info.value)
-
- def test_utterance_end_event_invalid_data_types(self):
- """Test utterance end event with invalid data types."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1UtteranceEndEvent(
- type="UtteranceEnd",
- channel=[0],
- last_word_end="not_a_number"
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
-
-class TestListenV1ControlMessage:
- """Test ListenV1ControlMessage model."""
-
- def test_valid_control_message(self):
- """Test creating a valid control message."""
- message = ListenV1ControlMessage(
- type="KeepAlive"
- )
-
- assert message.type == "KeepAlive"
-
- def test_control_message_serialization(self):
- """Test control message serialization."""
- message = ListenV1ControlMessage(type="KeepAlive")
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "KeepAlive"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"KeepAlive"' in json_str
-
- def test_control_message_missing_type(self):
- """Test control message with missing type field."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV1ControlMessage()
- assert "type" in str(exc_info.value)
-
-
-class TestListenV1MediaMessage:
- """Test ListenV1MediaMessage model."""
-
- def test_valid_media_message(self, sample_audio_data):
- """Test creating a valid media message."""
- # ListenV1MediaMessage is typically just bytes
- assert isinstance(sample_audio_data, bytes)
- assert len(sample_audio_data) > 0
-
- def test_empty_media_message(self):
- """Test empty media message."""
- empty_data = b""
- assert isinstance(empty_data, bytes)
- assert len(empty_data) == 0
-
-
-class TestListenV1ModelIntegration:
- """Integration tests for Listen V1 models."""
-
- def test_model_roundtrip_serialization(self, valid_model_data):
- """Test that models can be serialized and deserialized."""
- # Test metadata event roundtrip
- metadata_data = valid_model_data("listen_v1_metadata")
- original_event = ListenV1MetadataEvent(**metadata_data)
-
- # Serialize to JSON and back
- json_str = original_event.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_event = ListenV1MetadataEvent(**parsed_data)
-
- assert original_event.type == reconstructed_event.type
- assert original_event.request_id == reconstructed_event.request_id
- assert original_event.sha256 == reconstructed_event.sha256
- assert original_event.duration == reconstructed_event.duration
-
- def test_model_validation_edge_cases(self):
- """Test edge cases in model validation."""
- # Test with very long strings
- long_string = "x" * 10000
- event = ListenV1MetadataEvent(
- type="Metadata",
- request_id=long_string,
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration=1.0,
- channels=1
- )
- assert len(event.request_id) == 10000
-
- # Test with extreme numeric values
- event = ListenV1MetadataEvent(
- type="Metadata",
- request_id="test-123",
- sha256="abc123",
- created="2023-01-01T00:00:00Z",
- duration=999999.999999,
- channels=999999
- )
- assert event.duration == 999999.999999
- assert event.channels == 999999
-
- def test_model_immutability(self, valid_model_data):
- """Test that models are properly validated on construction."""
- data = valid_model_data("listen_v1_metadata")
- event = ListenV1MetadataEvent(**data)
-
- # Models should be immutable by default in Pydantic v2
- # Test that we can access all fields
- assert event.type == "Metadata"
- assert event.request_id is not None
- assert event.sha256 is not None
- assert event.created is not None
- assert event.duration is not None
- assert event.channels is not None
diff --git a/tests/unit/test_listen_v2_models.py b/tests/unit/test_listen_v2_models.py
deleted file mode 100644
index 9da429ae..00000000
--- a/tests/unit/test_listen_v2_models.py
+++ /dev/null
@@ -1,418 +0,0 @@
-"""
-Unit tests for Listen V2 socket event models.
-"""
-import pytest
-from pydantic import ValidationError
-
-from deepgram.extensions.types.sockets.listen_v2_connected_event import ListenV2ConnectedEvent
-from deepgram.extensions.types.sockets.listen_v2_turn_info_event import ListenV2TurnInfoEvent
-from deepgram.extensions.types.sockets.listen_v2_fatal_error_event import ListenV2FatalErrorEvent
-from deepgram.extensions.types.sockets.listen_v2_control_message import ListenV2ControlMessage
-from deepgram.extensions.types.sockets.listen_v2_media_message import ListenV2MediaMessage
-
-
-class TestListenV2ConnectedEvent:
- """Test ListenV2ConnectedEvent model."""
-
- def test_valid_connected_event(self):
- """Test creating a valid connected event."""
- event = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=1
- )
-
- assert event.type == "Connected"
- assert event.request_id == "req-123"
- assert event.sequence_id == 1
-
- def test_connected_event_serialization(self):
- """Test connected event serialization."""
- event = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=1
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Connected"
- assert event_dict["request_id"] == "req-123"
- assert event_dict["sequence_id"] == 1
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Connected"' in json_str
- assert '"request_id":"req-123"' in json_str
-
- def test_connected_event_missing_required_fields(self):
- """Test connected event with missing required fields."""
- # Missing request_id
- with pytest.raises(ValidationError) as exc_info:
- ListenV2ConnectedEvent(
- type="Connected",
- sequence_id=1
- )
- assert "request_id" in str(exc_info.value)
-
- # Missing sequence_id
- with pytest.raises(ValidationError) as exc_info:
- ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123"
- )
- assert "sequence_id" in str(exc_info.value)
-
- def test_connected_event_wrong_type(self):
- """Test connected event with wrong type field."""
- # Note: ListenV2ConnectedEvent doesn't enforce specific type values,
- # so this should succeed but with the wrong type value
- event = ListenV2ConnectedEvent(
- type="Results", # Wrong type but still valid string
- request_id="req-123",
- sequence_id=1
- )
- assert event.type == "Results" # It accepts any string
-
- def test_connected_event_invalid_data_types(self):
- """Test connected event with invalid data types."""
- # Invalid sequence_id type
- with pytest.raises(ValidationError) as exc_info:
- ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id="not_a_number"
- )
- assert "Input should be a valid integer" in str(exc_info.value)
-
-
-class TestListenV2TurnInfoEvent:
- """Test ListenV2TurnInfoEvent model."""
-
- def test_valid_turn_info_event(self):
- """Test creating a valid turn info event."""
- event = ListenV2TurnInfoEvent(
- type="TurnInfo",
- request_id="req-123",
- sequence_id=1,
- event="TurnInfo",
- turn_index=0,
- audio_window_start=0.0,
- audio_window_end=1.5,
- transcript="Hello world",
- words=[],
- end_of_turn_confidence=0.95
- )
-
- assert event.type == "TurnInfo"
- assert event.request_id == "req-123"
- assert event.sequence_id == 1
- assert event.event == "TurnInfo"
- assert event.turn_index == 0
- assert event.audio_window_start == 0.0
- assert event.audio_window_end == 1.5
- assert event.transcript == "Hello world"
-
- def test_turn_info_event_serialization(self):
- """Test turn info event serialization."""
- event = ListenV2TurnInfoEvent(
- type="TurnInfo",
- request_id="req-123",
- sequence_id=1,
- event="TurnInfo",
- turn_index=0,
- audio_window_start=0.0,
- audio_window_end=1.5,
- transcript="Hello world",
- words=[],
- end_of_turn_confidence=0.95
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "TurnInfo"
- assert event_dict["turn_index"] == 0
- assert event_dict["transcript"] == "Hello world"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"TurnInfo"' in json_str
- assert '"transcript":"Hello world"' in json_str
-
- def test_turn_info_event_missing_required_fields(self):
- """Test turn info event with missing required fields."""
- # Missing event field
- with pytest.raises(ValidationError) as exc_info:
- ListenV2TurnInfoEvent(
- type="TurnInfo",
- request_id="req-123",
- sequence_id=1,
- turn_index=0,
- audio_window_start=0.0,
- audio_window_end=1.5,
- transcript="Hello world",
- words=[],
- end_of_turn_confidence=0.95
- )
- assert "event" in str(exc_info.value)
-
- def test_turn_info_event_invalid_data_types(self):
- """Test turn info event with invalid data types."""
- # Invalid audio_window_start type
- with pytest.raises(ValidationError) as exc_info:
- ListenV2TurnInfoEvent(
- type="TurnInfo",
- request_id="req-123",
- sequence_id=1,
- event="TurnInfo",
- turn_index=0,
- audio_window_start="not_a_number",
- audio_window_end=1.5,
- transcript="Hello world",
- words=[],
- end_of_turn_confidence=0.95
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
- # Invalid audio_window_end type
- with pytest.raises(ValidationError) as exc_info:
- ListenV2TurnInfoEvent(
- type="TurnInfo",
- request_id="req-123",
- sequence_id=1,
- event="TurnInfo",
- turn_index=0,
- audio_window_start=0.0,
- audio_window_end="not_a_number",
- transcript="Hello world",
- words=[],
- end_of_turn_confidence=0.95
- )
- assert "Input should be a valid number" in str(exc_info.value)
-
-
-class TestListenV2FatalErrorEvent:
- """Test ListenV2FatalErrorEvent model."""
-
- def test_valid_fatal_error_event(self):
- """Test creating a valid fatal error event."""
- event = ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id=1,
- code="500",
- description="Internal server error"
- )
-
- assert event.type == "FatalError"
- assert event.sequence_id == 1
- assert event.code == "500"
- assert event.description == "Internal server error"
-
- def test_fatal_error_event_serialization(self):
- """Test fatal error event serialization."""
- event = ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id=1,
- code="500",
- description="Internal server error"
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "FatalError"
- assert event_dict["code"] == "500"
- assert event_dict["description"] == "Internal server error"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"FatalError"' in json_str
- assert '"code":"500"' in json_str
-
- def test_fatal_error_event_missing_required_fields(self):
- """Test fatal error event with missing required fields."""
- # Missing code
- with pytest.raises(ValidationError) as exc_info:
- ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id=1,
- description="Internal server error"
- )
- assert "code" in str(exc_info.value)
-
- # Missing description
- with pytest.raises(ValidationError) as exc_info:
- ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id=1,
- code=500
- )
- assert "description" in str(exc_info.value)
-
- def test_fatal_error_event_wrong_type(self):
- """Test fatal error event with wrong type field."""
- # Note: ListenV2FatalErrorEvent doesn't enforce specific type values,
- # so this should succeed but with the wrong type value
- event = ListenV2FatalErrorEvent(
- type="Connected", # Wrong type but still valid string
- sequence_id=1,
- code="500",
- description="Internal server error"
- )
- assert event.type == "Connected" # It accepts any string
-
- def test_fatal_error_event_invalid_data_types(self):
- """Test fatal error event with invalid data types."""
- # Invalid sequence_id type
- with pytest.raises(ValidationError) as exc_info:
- ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id="not_a_number",
- code="500",
- description="Internal server error"
- )
- assert "Input should be a valid integer" in str(exc_info.value)
-
-
-class TestListenV2ControlMessage:
- """Test ListenV2ControlMessage model."""
-
- def test_valid_control_message(self):
- """Test creating a valid control message."""
- message = ListenV2ControlMessage(
- type="CloseStream"
- )
-
- assert message.type == "CloseStream"
-
- def test_control_message_serialization(self):
- """Test control message serialization."""
- message = ListenV2ControlMessage(type="CloseStream")
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "CloseStream"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"CloseStream"' in json_str
-
- def test_control_message_missing_type(self):
- """Test control message with missing type field."""
- with pytest.raises(ValidationError) as exc_info:
- ListenV2ControlMessage()
- assert "type" in str(exc_info.value)
-
-
-class TestListenV2MediaMessage:
- """Test ListenV2MediaMessage model."""
-
- def test_valid_media_message(self):
- """Test creating a valid media message."""
- # ListenV2MediaMessage appears to be an empty model
- message = ListenV2MediaMessage()
-
- # Test that it can be instantiated
- assert message is not None
-
- def test_media_message_serialization(self):
- """Test media message serialization."""
- message = ListenV2MediaMessage()
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert isinstance(message_dict, dict)
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert isinstance(json_str, str)
-
-
-class TestListenV2ModelIntegration:
- """Integration tests for Listen V2 models."""
-
- def test_model_roundtrip_serialization(self):
- """Test that models can be serialized and deserialized."""
- # Test connected event roundtrip
- original_event = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=1
- )
-
- # Serialize to JSON and back
- json_str = original_event.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_event = ListenV2ConnectedEvent(**parsed_data)
-
- assert original_event.type == reconstructed_event.type
- assert original_event.request_id == reconstructed_event.request_id
- assert original_event.sequence_id == reconstructed_event.sequence_id
-
- def test_model_validation_edge_cases(self):
- """Test edge cases in model validation."""
- # Test with very long strings
- long_string = "x" * 10000
- event = ListenV2ConnectedEvent(
- type="Connected",
- request_id=long_string,
- sequence_id=999999
- )
- assert len(event.request_id) == 10000
- assert event.sequence_id == 999999
-
- # Test with negative sequence_id (should be allowed if not restricted)
- event = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=0
- )
- assert event.sequence_id == 0
-
- def test_model_comparison(self):
- """Test model equality comparison."""
- event1 = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=1
- )
- event2 = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-123",
- sequence_id=1
- )
- event3 = ListenV2ConnectedEvent(
- type="Connected",
- request_id="req-456",
- sequence_id=1
- )
-
- # Same data should be equal
- assert event1 == event2
- # Different data should not be equal
- assert event1 != event3
-
- def test_error_event_comprehensive(self):
- """Test comprehensive error event scenarios."""
- # Test common HTTP error codes
- error_codes = [400, 401, 403, 404, 429, 500, 502, 503]
- error_messages = [
- "Bad Request",
- "Unauthorized",
- "Forbidden",
- "Not Found",
- "Too Many Requests",
- "Internal Server Error",
- "Bad Gateway",
- "Service Unavailable"
- ]
-
- for code, message in zip(error_codes, error_messages):
- event = ListenV2FatalErrorEvent(
- type="FatalError",
- sequence_id=code,
- code=str(code),
- description=message
- )
- assert event.code == str(code)
- assert event.description == message
diff --git a/tests/unit/test_manage_billing_fields.py b/tests/unit/test_manage_billing_fields.py
deleted file mode 100644
index 76f8aadd..00000000
--- a/tests/unit/test_manage_billing_fields.py
+++ /dev/null
@@ -1,498 +0,0 @@
-"""
-Unit tests for manage projects billing fields models and methods.
-
-This module tests the billing fields list methods including:
-- ListBillingFieldsV1Response model validation
-- Sync and async client methods
-- Request parameter handling
-- Error scenarios
-"""
-
-import pytest
-from pydantic import ValidationError
-
-from deepgram.types.list_billing_fields_v1response import ListBillingFieldsV1Response
-from deepgram.types.list_billing_fields_v1response_deployments_item import (
- ListBillingFieldsV1ResponseDeploymentsItem,
-)
-
-
-class TestListBillingFieldsV1Response:
- """Test ListBillingFieldsV1Response model."""
-
- def test_valid_billing_fields_response_full(self):
- """Test creating a valid billing fields response with all fields."""
- response_data = {
- "accessors": [
- "12345678-1234-1234-1234-123456789012",
- "87654321-4321-4321-4321-210987654321",
- ],
- "deployments": ["hosted", "beta", "self-hosted", "dedicated"],
- "tags": ["tag1", "tag2", "production"],
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming",
- "batch::nova-2": "Nova-2 Batch",
- "streaming::enhanced": "Enhanced Streaming",
- },
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert len(response.accessors) == 2
- assert response.accessors[0] == "12345678-1234-1234-1234-123456789012"
- assert response.deployments is not None
- assert len(response.deployments) == 4
- assert "hosted" in response.deployments
- assert response.tags is not None
- assert len(response.tags) == 3
- assert "production" in response.tags
- assert response.line_items is not None
- assert len(response.line_items) == 3
- assert response.line_items["streaming::nova-3"] == "Nova-3 Streaming"
-
- def test_valid_billing_fields_response_minimal(self):
- """Test creating a billing fields response with minimal fields."""
- response_data = {}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is None
- assert response.deployments is None
- assert response.tags is None
- assert response.line_items is None
-
- def test_billing_fields_response_empty_lists(self):
- """Test billing fields response with empty lists."""
- response_data = {
- "accessors": [],
- "deployments": [],
- "tags": [],
- "line_items": {},
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors == []
- assert response.deployments == []
- assert response.tags == []
- assert response.line_items == {}
-
- def test_billing_fields_response_with_accessors_only(self):
- """Test billing fields response with only accessors."""
- response_data = {
- "accessors": [
- "11111111-1111-1111-1111-111111111111",
- "22222222-2222-2222-2222-222222222222",
- ]
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert len(response.accessors) == 2
- assert response.deployments is None
- assert response.tags is None
- assert response.line_items is None
-
- def test_billing_fields_response_with_deployments_only(self):
- """Test billing fields response with only deployments."""
- response_data = {"deployments": ["hosted", "dedicated"]}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is None
- assert response.deployments is not None
- assert len(response.deployments) == 2
- assert "hosted" in response.deployments
- assert "dedicated" in response.deployments
- assert response.tags is None
- assert response.line_items is None
-
- def test_billing_fields_response_with_tags_only(self):
- """Test billing fields response with only tags."""
- response_data = {"tags": ["development", "staging", "production"]}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is None
- assert response.deployments is None
- assert response.tags is not None
- assert len(response.tags) == 3
- assert "production" in response.tags
- assert response.line_items is None
-
- def test_billing_fields_response_with_line_items_only(self):
- """Test billing fields response with only line_items."""
- response_data = {
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming",
- "batch::whisper": "Whisper Batch",
- }
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is None
- assert response.deployments is None
- assert response.tags is None
- assert response.line_items is not None
- assert len(response.line_items) == 2
- assert response.line_items["batch::whisper"] == "Whisper Batch"
-
- def test_billing_fields_response_serialization(self):
- """Test billing fields response serialization."""
- response_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "deployments": ["hosted"],
- "tags": ["test-tag"],
- "line_items": {"streaming::nova-3": "Nova-3 Streaming"},
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- # Test dict conversion
- response_dict = response.model_dump()
- assert "accessors" in response_dict
- assert "deployments" in response_dict
- assert "tags" in response_dict
- assert "line_items" in response_dict
- assert response_dict["accessors"][0] == "12345678-1234-1234-1234-123456789012"
-
- # Test JSON serialization
- json_str = response.model_dump_json()
- assert '"accessors"' in json_str
- assert '"deployments"' in json_str
- assert '"tags"' in json_str
- assert '"line_items"' in json_str
- assert "12345678-1234-1234-1234-123456789012" in json_str
-
- def test_billing_fields_response_immutability(self):
- """Test that billing fields response is immutable (frozen)."""
- response = ListBillingFieldsV1Response(
- accessors=["12345678-1234-1234-1234-123456789012"]
- )
-
- with pytest.raises((AttributeError, ValidationError)):
- response.accessors = ["new-accessor"]
-
- def test_billing_fields_response_extra_fields_allowed(self):
- """Test that billing fields response allows extra fields."""
- response_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "extra_field": "extra_value",
- "custom_data": {"nested": "value"},
- }
-
- # Should not raise an error due to extra="allow"
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert hasattr(response, "extra_field")
- assert hasattr(response, "custom_data")
-
- def test_billing_fields_response_roundtrip_serialization(self):
- """Test that billing fields response can be serialized and deserialized."""
- original_data = {
- "accessors": [
- "12345678-1234-1234-1234-123456789012",
- "87654321-4321-4321-4321-210987654321",
- ],
- "deployments": ["hosted", "beta"],
- "tags": ["tag1", "tag2"],
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming",
- "batch::nova-2": "Nova-2 Batch",
- },
- }
-
- original_response = ListBillingFieldsV1Response(**original_data)
-
- # Serialize to JSON and back
- json_str = original_response.model_dump_json()
- import json
-
- parsed_data = json.loads(json_str)
- reconstructed_response = ListBillingFieldsV1Response(**parsed_data)
-
- assert original_response.accessors == reconstructed_response.accessors
- assert original_response.deployments == reconstructed_response.deployments
- assert original_response.tags == reconstructed_response.tags
- assert original_response.line_items == reconstructed_response.line_items
-
- def test_billing_fields_response_with_many_accessors(self):
- """Test billing fields response with many accessors."""
- # Simulate a response with many accessors
- accessors = [
- f"{i:08x}-1234-1234-1234-123456789012" for i in range(100)
- ]
- response_data = {"accessors": accessors}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert len(response.accessors) == 100
- assert response.accessors[0] == "00000000-1234-1234-1234-123456789012"
- assert response.accessors[99] == "00000063-1234-1234-1234-123456789012"
-
- def test_billing_fields_response_with_many_tags(self):
- """Test billing fields response with many tags."""
- # Simulate a response with many tags
- tags = [f"tag-{i}" for i in range(50)]
- response_data = {"tags": tags}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.tags is not None
- assert len(response.tags) == 50
- assert "tag-0" in response.tags
- assert "tag-49" in response.tags
-
- def test_billing_fields_response_with_many_line_items(self):
- """Test billing fields response with many line_items."""
- # Simulate a response with many line items
- line_items = {
- f"streaming::model-{i}": f"Model {i} Streaming" for i in range(20)
- }
- response_data = {"line_items": line_items}
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.line_items is not None
- assert len(response.line_items) == 20
- assert response.line_items["streaming::model-0"] == "Model 0 Streaming"
- assert response.line_items["streaming::model-19"] == "Model 19 Streaming"
-
- def test_billing_fields_response_with_special_characters_in_tags(self):
- """Test billing fields response with special characters in tags."""
- response_data = {
- "tags": [
- "tag-with-dashes",
- "tag_with_underscores",
- "tag.with.dots",
- "tag:with:colons",
- "tag/with/slashes",
- ]
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.tags is not None
- assert len(response.tags) == 5
- assert "tag-with-dashes" in response.tags
- assert "tag/with/slashes" in response.tags
-
- def test_billing_fields_response_with_unicode_in_line_items(self):
- """Test billing fields response with unicode characters."""
- response_data = {
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming π",
- "batch::model-ζ΅θ―": "Test Model ζ΅θ―",
- "streaming::Γ©moji": "Γmoji Model with accΓ©nts",
- }
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.line_items is not None
- assert response.line_items["streaming::nova-3"] == "Nova-3 Streaming π"
- assert response.line_items["batch::model-ζ΅θ―"] == "Test Model ζ΅θ―"
- assert response.line_items["streaming::Γ©moji"] == "Γmoji Model with accΓ©nts"
-
- def test_billing_fields_response_comparison(self):
- """Test billing fields response equality comparison."""
- response_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "deployments": ["hosted"],
- "tags": ["tag1"],
- "line_items": {"streaming::nova-3": "Nova-3 Streaming"},
- }
-
- response1 = ListBillingFieldsV1Response(**response_data)
- response2 = ListBillingFieldsV1Response(**response_data)
-
- # Same data should be equal
- assert response1 == response2
-
- # Different data should not be equal
- different_data = response_data.copy()
- different_data["accessors"] = ["87654321-4321-4321-4321-210987654321"]
- response3 = ListBillingFieldsV1Response(**different_data)
- assert response1 != response3
-
-
-class TestListBillingFieldsV1ResponseDeploymentsItem:
- """Test ListBillingFieldsV1ResponseDeploymentsItem type."""
-
- def test_deployments_item_literal_values(self):
- """Test that deployments item accepts literal values."""
- valid_deployments = ["hosted", "beta", "self-hosted", "dedicated"]
-
- for deployment in valid_deployments:
- deployment_value: ListBillingFieldsV1ResponseDeploymentsItem = deployment
- assert isinstance(deployment_value, str)
-
- def test_deployments_item_custom_value(self):
- """Test that deployments item accepts custom values due to typing.Any."""
- # String not in literals
- custom_deployment: ListBillingFieldsV1ResponseDeploymentsItem = (
- "custom-deployment"
- )
- assert isinstance(custom_deployment, str)
- assert custom_deployment == "custom-deployment"
-
- def test_deployments_item_in_response(self):
- """Test deployments item usage within a response."""
- response_data = {
- "deployments": ["hosted", "beta", "custom-deployment", "self-hosted"]
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.deployments is not None
- assert len(response.deployments) == 4
- assert "hosted" in response.deployments
- assert "custom-deployment" in response.deployments
-
-
-class TestBillingFieldsResponseIntegration:
- """Integration tests for billing fields response models."""
-
- def test_realistic_billing_fields_response(self):
- """Test a realistic billing fields response with typical data."""
- response_data = {
- "accessors": [
- "a1b2c3d4-5678-90ab-cdef-1234567890ab",
- "b2c3d4e5-6789-01bc-def0-234567890abc",
- "c3d4e5f6-7890-12cd-ef01-34567890abcd",
- ],
- "deployments": ["hosted", "self-hosted"],
- "tags": [
- "production",
- "customer-123",
- "region-us-east",
- "team-engineering",
- ],
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming Transcription",
- "streaming::nova-2": "Nova-2 Streaming Transcription",
- "batch::nova-3": "Nova-3 Batch Transcription",
- "batch::whisper": "Whisper Batch Transcription",
- "streaming::enhanced": "Enhanced Streaming Transcription",
- "tts::aura": "Aura Text-to-Speech",
- },
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- # Verify all fields are present and correct
- assert len(response.accessors) == 3
- assert len(response.deployments) == 2
- assert len(response.tags) == 4
- assert len(response.line_items) == 6
-
- # Verify specific values
- assert "customer-123" in response.tags
- assert "hosted" in response.deployments
- assert response.line_items["tts::aura"] == "Aura Text-to-Speech"
-
- def test_billing_fields_response_with_date_filters(self):
- """Test billing fields response scenario with date-filtered data."""
- # This represents a response for a specific date range
- response_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "deployments": ["hosted"],
- "tags": ["q1-2024", "january"],
- "line_items": {
- "streaming::nova-3": "Nova-3 Streaming",
- "batch::nova-2": "Nova-2 Batch",
- },
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert len(response.accessors) == 1
- assert "q1-2024" in response.tags
- assert len(response.line_items) == 2
-
- def test_billing_fields_response_empty_results(self):
- """Test billing fields response with no data for the period."""
- # This represents a response for a period with no billing data
- response_data = {
- "accessors": [],
- "deployments": [],
- "tags": [],
- "line_items": {},
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors == []
- assert response.deployments == []
- assert response.tags == []
- assert response.line_items == {}
-
- def test_billing_fields_response_partial_data(self):
- """Test billing fields response with partial data."""
- # Some projects might only have certain fields populated
- response_data = {
- "deployments": ["hosted"],
- "line_items": {"streaming::nova-3": "Nova-3 Streaming"},
- }
-
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is None
- assert response.deployments is not None
- assert response.tags is None
- assert response.line_items is not None
-
- def test_multiple_billing_fields_responses_comparison(self):
- """Test comparing multiple billing fields responses."""
- response1_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "tags": ["january"],
- }
-
- response2_data = {
- "accessors": [
- "12345678-1234-1234-1234-123456789012",
- "87654321-4321-4321-4321-210987654321",
- ],
- "tags": ["february"],
- }
-
- response1 = ListBillingFieldsV1Response(**response1_data)
- response2 = ListBillingFieldsV1Response(**response2_data)
-
- # Verify they are different
- assert response1 != response2
- assert len(response1.accessors) == 1
- assert len(response2.accessors) == 2
-
- def test_billing_fields_response_model_evolution(self):
- """Test that the model handles potential future fields gracefully."""
- # Simulate a response with additional fields that might be added in the future
- response_data = {
- "accessors": ["12345678-1234-1234-1234-123456789012"],
- "deployments": ["hosted"],
- "tags": ["tag1"],
- "line_items": {"streaming::nova-3": "Nova-3 Streaming"},
- # Future fields
- "future_field_1": "some_value",
- "future_field_2": {"nested": "data"},
- "future_field_3": [1, 2, 3],
- }
-
- # Should not raise an error due to extra="allow"
- response = ListBillingFieldsV1Response(**response_data)
-
- assert response.accessors is not None
- assert response.deployments is not None
- assert response.tags is not None
- assert response.line_items is not None
- assert hasattr(response, "future_field_1")
- assert hasattr(response, "future_field_2")
- assert hasattr(response, "future_field_3")
-
diff --git a/tests/unit/test_speak_v1_models.py b/tests/unit/test_speak_v1_models.py
deleted file mode 100644
index d4e7873e..00000000
--- a/tests/unit/test_speak_v1_models.py
+++ /dev/null
@@ -1,462 +0,0 @@
-"""
-Unit tests for Speak V1 socket event models.
-"""
-import pytest
-from pydantic import ValidationError
-
-from deepgram.extensions.types.sockets.speak_v1_metadata_event import SpeakV1MetadataEvent
-from deepgram.extensions.types.sockets.speak_v1_control_event import SpeakV1ControlEvent
-from deepgram.extensions.types.sockets.speak_v1_warning_event import SpeakV1WarningEvent
-from deepgram.extensions.types.sockets.speak_v1_audio_chunk_event import SpeakV1AudioChunkEvent
-from deepgram.extensions.types.sockets.speak_v1_text_message import SpeakV1TextMessage
-from deepgram.extensions.types.sockets.speak_v1_control_message import SpeakV1ControlMessage
-
-
-class TestSpeakV1MetadataEvent:
- """Test SpeakV1MetadataEvent model."""
-
- def test_valid_metadata_event(self, valid_model_data):
- """Test creating a valid metadata event."""
- data = valid_model_data("speak_v1_metadata")
- event = SpeakV1MetadataEvent(**data)
-
- assert event.type == "Metadata"
- assert event.request_id == "speak-123"
- assert event.model_name == "aura-asteria-en"
- assert event.model_version == "1.0"
- assert event.model_uuid == "uuid-123"
-
- def test_metadata_event_serialization(self, valid_model_data):
- """Test metadata event serialization."""
- data = valid_model_data("speak_v1_metadata")
- event = SpeakV1MetadataEvent(**data)
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Metadata"
- assert event_dict["request_id"] == "speak-123"
- assert event_dict["model_name"] == "aura-asteria-en"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Metadata"' in json_str
- assert '"request_id":"speak-123"' in json_str
-
- def test_metadata_event_missing_required_fields(self):
- """Test metadata event with missing required fields."""
- # Missing request_id
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1MetadataEvent(
- type="Metadata",
- model_name="aura-asteria-en",
- model_version="1.0",
- model_uuid="uuid-123"
- )
- assert "request_id" in str(exc_info.value)
-
- # Missing model_name
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1MetadataEvent(
- type="Metadata",
- request_id="speak-123",
- model_version="1.0",
- model_uuid="uuid-123"
- )
- assert "model_name" in str(exc_info.value)
-
- def test_metadata_event_wrong_type(self):
- """Test metadata event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1MetadataEvent(
- type="Audio", # Wrong type
- request_id="speak-123",
- model_name="aura-asteria-en",
- model_version="1.0",
- model_uuid="uuid-123"
- )
- assert "Input should be 'Metadata'" in str(exc_info.value)
-
- def test_metadata_event_optional_fields(self):
- """Test metadata event with minimal required fields."""
- event = SpeakV1MetadataEvent(
- type="Metadata",
- request_id="speak-123",
- model_name="aura-asteria-en",
- model_version="1.0",
- model_uuid="uuid-123"
- )
-
- assert event.type == "Metadata"
- assert event.request_id == "speak-123"
- assert event.model_name == "aura-asteria-en"
-
-
-class TestSpeakV1ControlEvent:
- """Test SpeakV1ControlEvent model."""
-
- def test_valid_control_event(self):
- """Test creating a valid control event."""
- event = SpeakV1ControlEvent(
- type="Flushed",
- sequence_id=1
- )
-
- assert event.type == "Flushed"
- assert event.sequence_id == 1
-
- def test_control_event_serialization(self):
- """Test control event serialization."""
- event = SpeakV1ControlEvent(
- type="Flushed",
- sequence_id=1
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Flushed"
- assert event_dict["sequence_id"] == 1
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Flushed"' in json_str
- assert '"sequence_id":1' in json_str
-
- def test_control_event_missing_required_fields(self):
- """Test control event with missing required fields."""
- # Missing sequence_id
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1ControlEvent(
- type="Flushed"
- )
- assert "sequence_id" in str(exc_info.value)
-
- def test_control_event_wrong_type(self):
- """Test control event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1ControlEvent(
- type="Metadata", # Wrong type
- sequence_id=1
- )
- assert "Input should be 'Flushed'" in str(exc_info.value)
-
- def test_control_event_invalid_data_types(self):
- """Test control event with invalid data types."""
- # Invalid sequence_id type
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1ControlEvent(
- type="Flushed",
- sequence_id="not_a_number"
- )
- assert "Input should be a valid integer" in str(exc_info.value)
-
-
-class TestSpeakV1WarningEvent:
- """Test SpeakV1WarningEvent model."""
-
- def test_valid_warning_event(self):
- """Test creating a valid warning event."""
- event = SpeakV1WarningEvent(
- type="Warning",
- description="Audio quality may be degraded",
- code="AUDIO_QUALITY_WARNING"
- )
-
- assert event.type == "Warning"
- assert event.description == "Audio quality may be degraded"
- assert event.code == "AUDIO_QUALITY_WARNING"
-
- def test_warning_event_serialization(self):
- """Test warning event serialization."""
- event = SpeakV1WarningEvent(
- type="Warning",
- description="Audio quality may be degraded",
- code="AUDIO_QUALITY_WARNING"
- )
-
- # Test dict conversion
- event_dict = event.model_dump()
- assert event_dict["type"] == "Warning"
- assert event_dict["description"] == "Audio quality may be degraded"
- assert event_dict["code"] == "AUDIO_QUALITY_WARNING"
-
- # Test JSON serialization
- json_str = event.model_dump_json()
- assert '"type":"Warning"' in json_str
- assert '"description":"Audio quality may be degraded"' in json_str
-
- def test_warning_event_missing_required_fields(self):
- """Test warning event with missing required fields."""
- # Missing description
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1WarningEvent(
- type="Warning",
- code="AUDIO_QUALITY_WARNING"
- )
- assert "description" in str(exc_info.value)
-
- # Missing code
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1WarningEvent(
- type="Warning",
- description="Audio quality may be degraded"
- )
- assert "code" in str(exc_info.value)
-
- def test_warning_event_wrong_type(self):
- """Test warning event with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1WarningEvent(
- type="Error", # Wrong type
- description="Audio quality may be degraded",
- code="AUDIO_QUALITY_WARNING"
- )
- assert "Input should be 'Warning'" in str(exc_info.value)
-
-
-class TestSpeakV1AudioChunkEvent:
- """Test SpeakV1AudioChunkEvent model."""
-
- def test_valid_audio_chunk_event(self, sample_audio_data):
- """Test creating a valid audio chunk event."""
- # SpeakV1AudioChunkEvent is typically just bytes
- assert isinstance(sample_audio_data, bytes)
- assert len(sample_audio_data) > 0
-
- def test_empty_audio_chunk(self):
- """Test empty audio chunk."""
- empty_data = b""
- assert isinstance(empty_data, bytes)
- assert len(empty_data) == 0
-
- def test_large_audio_chunk(self):
- """Test large audio chunk."""
- large_data = b"\x00\x01\x02\x03" * 10000 # 40KB
- assert isinstance(large_data, bytes)
- assert len(large_data) == 40000
-
-
-class TestSpeakV1TextMessage:
- """Test SpeakV1TextMessage model."""
-
- def test_valid_text_message(self):
- """Test creating a valid text message."""
- message = SpeakV1TextMessage(
- type="Speak",
- text="Hello, world!"
- )
-
- assert message.type == "Speak"
- assert message.text == "Hello, world!"
-
- def test_text_message_serialization(self):
- """Test text message serialization."""
- message = SpeakV1TextMessage(
- type="Speak",
- text="Hello, world!"
- )
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "Speak"
- assert message_dict["text"] == "Hello, world!"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"Speak"' in json_str
- assert '"text":"Hello, world!"' in json_str
-
- def test_text_message_missing_required_fields(self):
- """Test text message with missing required fields."""
- # Missing text
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1TextMessage(
- type="Speak"
- )
- assert "text" in str(exc_info.value)
-
- def test_text_message_wrong_type(self):
- """Test text message with wrong type field."""
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1TextMessage(
- type="Control", # Wrong type
- text="Hello, world!"
- )
- assert "Input should be 'Speak'" in str(exc_info.value)
-
- def test_text_message_empty_text(self):
- """Test text message with empty text."""
- message = SpeakV1TextMessage(
- type="Speak",
- text=""
- )
-
- assert message.type == "Speak"
- assert message.text == ""
-
- def test_text_message_long_text(self):
- """Test text message with very long text."""
- long_text = "Hello, world! " * 1000 # ~14KB
- message = SpeakV1TextMessage(
- type="Speak",
- text=long_text
- )
-
- assert message.type == "Speak"
- assert len(message.text) > 10000
-
- def test_text_message_special_characters(self):
- """Test text message with special characters."""
- special_text = "Hello! π γγγ«γ‘γ― δ½ ε₯½ π΅ ñÑéΓΓ³ΓΊ @#$%^&*()_+-=[]{}|;':\",./<>?"
- message = SpeakV1TextMessage(
- type="Speak",
- text=special_text
- )
-
- assert message.type == "Speak"
- assert message.text == special_text
-
-
-class TestSpeakV1ControlMessage:
- """Test SpeakV1ControlMessage model."""
-
- def test_valid_control_message(self):
- """Test creating a valid control message."""
- message = SpeakV1ControlMessage(
- type="Flush"
- )
-
- assert message.type == "Flush"
-
- def test_control_message_serialization(self):
- """Test control message serialization."""
- message = SpeakV1ControlMessage(type="Flush")
-
- # Test dict conversion
- message_dict = message.model_dump()
- assert message_dict["type"] == "Flush"
-
- # Test JSON serialization
- json_str = message.model_dump_json()
- assert '"type":"Flush"' in json_str
-
- def test_control_message_missing_type(self):
- """Test control message with missing type field."""
- with pytest.raises(ValidationError) as exc_info:
- SpeakV1ControlMessage()
- assert "type" in str(exc_info.value)
-
- def test_control_message_different_types(self):
- """Test control message with different valid types."""
- valid_types = ["Flush", "Clear", "Close"]
-
- for control_type in valid_types:
- message = SpeakV1ControlMessage(type=control_type)
- assert message.type == control_type
-
-
-class TestSpeakV1ModelIntegration:
- """Integration tests for Speak V1 models."""
-
- def test_model_roundtrip_serialization(self, valid_model_data):
- """Test that models can be serialized and deserialized."""
- # Test metadata event roundtrip
- metadata_data = valid_model_data("speak_v1_metadata")
- original_event = SpeakV1MetadataEvent(**metadata_data)
-
- # Serialize to JSON and back
- json_str = original_event.model_dump_json()
- import json
- parsed_data = json.loads(json_str)
- reconstructed_event = SpeakV1MetadataEvent(**parsed_data)
-
- assert original_event.type == reconstructed_event.type
- assert original_event.request_id == reconstructed_event.request_id
- assert original_event.model_name == reconstructed_event.model_name
-
- def test_model_validation_edge_cases(self):
- """Test edge cases in model validation."""
- # Test with very long strings
- long_string = "x" * 10000
- event = SpeakV1MetadataEvent(
- type="Metadata",
- request_id=long_string,
- model_name="aura-asteria-en",
- model_version="1.0",
- model_uuid="uuid-123"
- )
- assert len(event.request_id) == 10000
-
- def test_comprehensive_text_scenarios(self):
- """Test comprehensive text message scenarios."""
- test_cases = [
- # Empty text
- "",
- # Simple text
- "Hello, world!",
- # Text with numbers
- "The year is 2023 and the temperature is 25.5 degrees.",
- # Text with punctuation
- "Hello! How are you? I'm fine, thanks. What about you...",
- # Text with newlines
- "Line 1\nLine 2\nLine 3",
- # Text with tabs
- "Column1\tColumn2\tColumn3",
- # Mixed case
- "MiXeD CaSe TeXt",
- # Only numbers
- "1234567890",
- # Only symbols
- "!@#$%^&*()",
- ]
-
- for text in test_cases:
- message = SpeakV1TextMessage(
- type="Speak",
- text=text
- )
- assert message.text == text
- assert message.type == "Speak"
-
- def test_model_immutability(self, valid_model_data):
- """Test that models are properly validated on construction."""
- data = valid_model_data("speak_v1_metadata")
- event = SpeakV1MetadataEvent(**data)
-
- # Models should be immutable by default in Pydantic v2
- # Test that we can access all fields
- assert event.type == "Metadata"
- assert event.request_id is not None
- assert event.model_name is not None
- assert event.model_version is not None
- assert event.model_uuid is not None
-
- def test_warning_event_comprehensive(self):
- """Test comprehensive warning event scenarios."""
- # Test common warning scenarios
- warning_scenarios = [
- {
- "description": "Audio quality may be degraded due to low bitrate",
- "code": "AUDIO_QUALITY_WARNING"
- },
- {
- "description": "Rate limit approaching",
- "code": "RATE_LIMIT_WARNING"
- },
- {
- "description": "Model switching to fallback version",
- "code": "MODEL_FALLBACK_WARNING"
- },
- {
- "description": "Connection quality poor",
- "code": "CONNECTION_WARNING"
- }
- ]
-
- for scenario in warning_scenarios:
- event = SpeakV1WarningEvent(
- type="Warning",
- description=scenario["description"],
- code=scenario["code"]
- )
- assert event.description == scenario["description"]
- assert event.code == scenario["code"]
diff --git a/tests/unit/test_telemetry_batching_handler.py b/tests/unit/test_telemetry_batching_handler.py
deleted file mode 100644
index 6721f7ee..00000000
--- a/tests/unit/test_telemetry_batching_handler.py
+++ /dev/null
@@ -1,833 +0,0 @@
-"""
-Unit tests for batching telemetry handler.
-Tests batching logic, background processing, error handling, and synchronous mode.
-"""
-
-import pytest
-import time
-import threading
-import queue
-from unittest.mock import Mock, patch, MagicMock
-import httpx
-
-from deepgram.extensions.telemetry.batching_handler import BatchingTelemetryHandler
-
-
-class TestBatchingTelemetryHandler:
- """Test BatchingTelemetryHandler initialization and basic functionality."""
-
- def test_handler_initialization_default(self):
- """Test handler initialization with default parameters."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key"
- )
-
- assert handler._endpoint == "https://telemetry.deepgram.com/v1/events"
- assert handler._api_key == "test_key"
- assert handler._batch_size == 20
- assert handler._max_interval == 5.0
- assert handler._content_type == "application/x-protobuf"
- assert handler._max_consecutive_failures == 5
- assert handler._consecutive_failures == 0
- assert handler._disabled is False
- assert handler._synchronous is False
-
- def test_handler_initialization_custom_params(self):
- """Test handler initialization with custom parameters."""
- mock_client = Mock(spec=httpx.Client)
- mock_encoder = Mock()
- mock_context_provider = Mock(return_value={"app": "test"})
-
- handler = BatchingTelemetryHandler(
- endpoint="https://custom.endpoint.com/events",
- api_key="custom_key",
- batch_size=50,
- max_interval_seconds=10.0,
- max_queue_size=2000,
- client=mock_client,
- encode_batch=mock_encoder,
- content_type="application/json",
- context_provider=mock_context_provider,
- max_consecutive_failures=3,
- synchronous=True
- )
-
- assert handler._endpoint == "https://custom.endpoint.com/events"
- assert handler._api_key == "custom_key"
- assert handler._batch_size == 50
- assert handler._max_interval == 10.0
- assert handler._content_type == "application/json"
- assert handler._max_consecutive_failures == 3
- assert handler._synchronous is True
- assert handler._client == mock_client
- assert handler._encode_batch == mock_encoder
- assert handler._context_provider == mock_context_provider
-
- def test_handler_initialization_synchronous_mode(self):
- """Test handler initialization in synchronous mode."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- assert handler._synchronous is True
- assert hasattr(handler, '_buffer_sync')
- assert handler._buffer_sync == []
- # Should not have worker thread attributes in sync mode
- assert not hasattr(handler, '_queue')
- assert not hasattr(handler, '_worker')
-
- def test_handler_initialization_async_mode(self):
- """Test handler initialization in asynchronous mode."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=False
- )
-
- assert handler._synchronous is False
- assert hasattr(handler, '_queue')
- assert hasattr(handler, '_worker')
- assert isinstance(handler._queue, queue.Queue)
- assert isinstance(handler._worker, threading.Thread)
- assert handler._worker.daemon is True
-
- # Clean up
- handler.close()
-
- def test_handler_parameter_validation(self):
- """Test parameter validation and bounds checking."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- batch_size=0, # Should be clamped to 1
- max_interval_seconds=0.1, # Should be clamped to 0.25
- max_consecutive_failures=0 # Should be clamped to 1
- )
-
- assert handler._batch_size == 1
- assert handler._max_interval == 0.25
- assert handler._max_consecutive_failures == 1
-
- # Clean up
- handler.close()
-
-
-class TestBatchingTelemetryHandlerSynchronous:
- """Test BatchingTelemetryHandler in synchronous mode."""
-
- def test_sync_event_buffering(self):
- """Test event buffering in synchronous mode."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- # Add some events
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- handler.on_http_response(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- status_code=200,
- duration_ms=150.0,
- headers={"content-type": "application/json"}
- )
-
- # Events should be buffered locally
- assert len(handler._buffer_sync) == 2
- assert handler._buffer_sync[0]["type"] == "http_request"
- assert handler._buffer_sync[1]["type"] == "http_response"
- assert handler._buffer_sync[0]["method"] == "GET"
- assert handler._buffer_sync[1]["status_code"] == 200
-
- def test_sync_event_context_enrichment(self):
- """Test event context enrichment in synchronous mode."""
- mock_context_provider = Mock(return_value={
- "app_name": "test_app",
- "version": "1.0.0",
- "environment": "test"
- })
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- context_provider=mock_context_provider
- )
-
- handler.on_http_request(
- method="POST",
- url="https://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token test"},
- extras={"client": "python-sdk"}
- )
-
- assert len(handler._buffer_sync) == 1
- event = handler._buffer_sync[0]
- assert event["type"] == "http_request"
- assert event["method"] == "POST"
- assert event["extras"]["client"] == "python-sdk"
- assert "ts" in event # Timestamp should be added
-
- @patch('httpx.Client')
- def test_sync_flush_success(self, mock_client_class):
- """Test successful flush in synchronous mode."""
- mock_client = Mock()
- mock_response = Mock()
- mock_response.status_code = 200
- mock_client.post.return_value = mock_response
- mock_client_class.return_value = mock_client
-
- mock_encoder = Mock(return_value=b"encoded_batch_data")
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- encode_batch=mock_encoder
- )
-
- # Add events to buffer
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # Flush should succeed
- handler.flush()
-
- # Verify encoder was called
- mock_encoder.assert_called_once()
-
- # Verify HTTP client was called correctly
- # The actual implementation uses Bearer auth and gzip compression
- mock_client.post.assert_called_once()
- call_args = mock_client.post.call_args
- assert call_args[0][0] == "https://telemetry.deepgram.com/v1/events"
- assert "content" in call_args[1]
- assert call_args[1]["headers"]["authorization"] == "Bearer test_key"
- assert call_args[1]["headers"]["content-type"] == "application/x-protobuf"
- assert call_args[1]["headers"]["content-encoding"] == "gzip"
-
- # Buffer should be cleared after successful flush
- assert len(handler._buffer_sync) == 0
-
- @patch('httpx.Client')
- def test_sync_flush_http_error(self, mock_client_class):
- """Test flush with HTTP error in synchronous mode."""
- mock_client = Mock()
- mock_response = Mock()
- mock_response.status_code = 500
- mock_response.text = "Internal Server Error"
- mock_client.post.return_value = mock_response
- mock_client_class.return_value = mock_client
-
- mock_encoder = Mock(return_value=b"encoded_batch_data")
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- encode_batch=mock_encoder,
- max_consecutive_failures=2
- )
-
- # Add event to buffer
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # First flush should handle HTTP 500 error - check if it's treated as failure
- handler.flush()
- # The implementation might not treat HTTP 500 as a failure for telemetry
- # Let's just verify the handler is still operational
- assert handler._disabled is False
-
- # Add another event and check if handler continues working
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test2",
- headers={"Authorization": "Token test"}
- )
- handler.flush()
- # Handler should still be operational for telemetry
- assert handler._disabled is False
-
- @patch('httpx.Client')
- def test_sync_flush_network_error(self, mock_client_class):
- """Test flush with network error in synchronous mode."""
- mock_client = Mock()
- mock_client.post.side_effect = httpx.ConnectError("Connection failed")
- mock_client_class.return_value = mock_client
-
- mock_encoder = Mock(return_value=b"encoded_batch_data")
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- encode_batch=mock_encoder
- )
-
- # Add event to buffer
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # Flush should handle network error gracefully
- handler.flush()
- assert handler._consecutive_failures == 1
- assert handler._disabled is False
-
- def test_sync_disabled_handler_skips_events(self):
- """Test that disabled handler skips new events."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- # Manually disable handler
- handler._disabled = True
-
- # Add event - should be ignored
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- assert len(handler._buffer_sync) == 0
-
-
-class TestBatchingTelemetryHandlerAsynchronous:
- """Test BatchingTelemetryHandler in asynchronous mode."""
-
- def test_async_event_enqueuing(self):
- """Test event enqueuing in asynchronous mode."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- max_queue_size=100,
- synchronous=False
- )
-
- try:
- # Add events
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token test"}
- )
-
- # Give worker thread a moment to process
- time.sleep(0.1)
-
- # Queue should have received events (or they should be processed)
- # We can't easily check queue contents since worker processes them
- # But we can verify no exceptions were raised
- assert not handler._disabled
-
- finally:
- handler.close()
-
- def test_async_queue_full_drops_events(self):
- """Test that full queue drops events rather than blocking."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- max_queue_size=2, # Very small queue
- synchronous=False
- )
-
- try:
- # Fill up the queue
- for i in range(10): # More events than queue size
- handler.on_http_request(
- method="GET",
- url=f"https://api.deepgram.com/v1/test{i}",
- headers={"Authorization": "Token test"}
- )
-
- # Should not block or raise exception
- # Some events should be dropped
- assert not handler._disabled
-
- finally:
- handler.close()
-
- def test_async_force_flush_on_error(self):
- """Test that error events trigger immediate flush."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- batch_size=100, # Large batch size
- synchronous=False
- )
-
- try:
- # Add regular event (should not trigger immediate flush)
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # Add error event (should trigger immediate flush)
- handler.on_http_error(
- method="POST",
- url="https://api.deepgram.com/v1/error",
- error=Exception("Test error"),
- duration_ms=1000.0
- )
-
- # Give worker thread time to process
- time.sleep(0.2)
-
- # Should not be disabled
- assert not handler._disabled
-
- finally:
- handler.close()
-
- def test_async_worker_thread_properties(self):
- """Test worker thread properties and lifecycle."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=False
- )
-
- try:
- # Worker should be running
- assert handler._worker.is_alive()
- assert handler._worker.daemon is True
- assert handler._worker.name == "dg-telemetry-worker"
-
- # Stop event should not be set initially
- assert not handler._stop_event.is_set()
-
- finally:
- handler.close()
-
- # After close, stop event should be set
- assert handler._stop_event.is_set()
-
- def test_async_close_waits_for_worker(self):
- """Test that close() waits for worker thread to finish."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=False
- )
-
- # Add some events
- for i in range(5):
- handler.on_http_request(
- method="GET",
- url=f"https://api.deepgram.com/v1/test{i}",
- headers={"Authorization": "Token test"}
- )
-
- worker_thread = handler._worker
- assert worker_thread.is_alive()
-
- # Close should wait for worker to finish
- handler.close()
-
- # Worker should be stopped (give it a moment to finish)
- time.sleep(0.1)
- assert handler._stop_event.is_set()
- # Worker thread may still be alive briefly due to daemon status
-
-
-class TestBatchingTelemetryHandlerEventTypes:
- """Test different event types with BatchingTelemetryHandler."""
-
- def test_http_request_event_structure(self):
- """Test HTTP request event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_http_request(
- method="POST",
- url="https://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token abc123", "Content-Type": "application/json"},
- extras={"sdk": "python", "version": "3.2.1"},
- request_details={"request_id": "req-123", "payload_size": 1024}
- )
-
- assert len(handler._buffer_sync) == 1
- event = handler._buffer_sync[0]
-
- assert event["type"] == "http_request"
- assert event["method"] == "POST"
- assert event["url"] == "https://api.deepgram.com/v1/listen"
- assert "ts" in event
- assert event["request_id"] == "req-123"
- assert event["extras"]["sdk"] == "python"
- assert event["request_details"]["payload_size"] == 1024
-
- def test_http_response_event_structure(self):
- """Test HTTP response event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_http_response(
- method="GET",
- url="https://api.deepgram.com/v1/projects",
- status_code=200,
- duration_ms=245.7,
- headers={"content-type": "application/json"},
- extras={"region": "us-east-1"},
- response_details={"request_id": "req-456", "response_size": 2048}
- )
-
- assert len(handler._buffer_sync) == 1
- event = handler._buffer_sync[0]
-
- assert event["type"] == "http_response"
- assert event["method"] == "GET"
- assert event["status_code"] == 200
- assert event["duration_ms"] == 245.7
- assert "ts" in event
- assert event["request_id"] == "req-456"
- assert event["extras"]["region"] == "us-east-1"
- assert event["response_details"]["response_size"] == 2048
-
- def test_http_response_5xx_creates_error_event(self):
- """Test that 5XX HTTP responses create additional error events."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_http_response(
- method="POST",
- url="https://api.deepgram.com/v1/listen",
- status_code=503,
- duration_ms=5000.0,
- headers={"content-type": "application/json"},
- response_details={"request_id": "req-error"}
- )
-
- # Check if any events were created
- # The handler might immediately flush events or filter them
- if len(handler._buffer_sync) >= 1:
- response_event = handler._buffer_sync[0]
- assert response_event["type"] == "http_response"
- assert response_event["status_code"] == 503
- else:
- # Events may have been immediately flushed due to force_flush or filtered
- # This is acceptable behavior for telemetry
- pass
-
- def test_http_response_4xx_no_error_event(self):
- """Test that 4XX HTTP responses do not create error events."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_http_response(
- method="POST",
- url="https://api.deepgram.com/v1/listen",
- status_code=401,
- duration_ms=100.0,
- headers={"content-type": "application/json"}
- )
-
- # Should only create response event, no error event for 4XX
- assert len(handler._buffer_sync) == 1
- assert handler._buffer_sync[0]["type"] == "http_response"
- assert handler._buffer_sync[0]["status_code"] == 401
-
- def test_http_error_event_structure(self):
- """Test HTTP error event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- test_error = ConnectionError("Network timeout")
- handler.on_http_error(
- method="PUT",
- url="https://api.deepgram.com/v1/models",
- error=test_error,
- duration_ms=5000.0,
- request_details={"request_id": "req-error", "retry_count": 2},
- response_details={"status_code": 503}
- )
-
- # The handler may not create events for 5XX status codes in response_details
- # Let's check what actually gets created
- if len(handler._buffer_sync) > 0:
- event = handler._buffer_sync[0]
- assert event["type"] == "http_error"
- assert event["method"] == "PUT"
- assert event["error"] == "ConnectionError"
- assert event["message"] == "Network timeout"
- assert "stack_trace" in event
- else:
- # Handler filtered out this error due to 5XX status code
- pass
-
- def test_http_error_skips_4xx_client_errors(self):
- """Test that HTTP error handler skips 4XX client errors."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- auth_error = Exception("Unauthorized")
- handler.on_http_error(
- method="GET",
- url="https://api.deepgram.com/v1/projects",
- error=auth_error,
- duration_ms=100.0,
- response_details={"status_code": 401}
- )
-
- # Should skip 4XX client errors
- assert len(handler._buffer_sync) == 0
-
- def test_websocket_connect_event_structure(self):
- """Test WebSocket connect event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/speak",
- headers={"Authorization": "Token xyz789"},
- extras={"protocol": "websocket", "version": "v1"},
- request_details={"session_id": "ws-connect-123"}
- )
-
- assert len(handler._buffer_sync) == 1
- event = handler._buffer_sync[0]
-
- assert event["type"] == "ws_connect"
- assert event["url"] == "wss://api.deepgram.com/v1/speak"
- assert "ts" in event
- assert event["extras"]["protocol"] == "websocket"
- assert event["request_details"]["session_id"] == "ws-connect-123"
-
- def test_websocket_error_event_structure(self):
- """Test WebSocket error event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- ws_error = ConnectionError("WebSocket connection closed unexpectedly")
- handler.on_ws_error(
- url="wss://api.deepgram.com/v1/agent",
- error=ws_error,
- extras={"reconnect_attempt": "3"},
- request_details={"session_id": "ws-error-456"},
- response_details={
- "close_code": 1006,
- "close_reason": "Abnormal closure",
- "stack_trace": "Custom stack trace"
- }
- )
-
- # Check if event was created (may be filtered)
- if len(handler._buffer_sync) > 0:
- event = handler._buffer_sync[0]
- assert event["type"] == "ws_error"
- assert event["url"] == "wss://api.deepgram.com/v1/agent"
- assert event["error"] == "ConnectionError"
- assert event["message"] == "WebSocket connection closed unexpectedly"
- assert "stack_trace" in event
- else:
- # Event may have been filtered
- pass
-
- def test_websocket_close_event_structure(self):
- """Test WebSocket close event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- handler.on_ws_close(url="wss://api.deepgram.com/v1/listen")
-
- # Check if event was created
- if len(handler._buffer_sync) > 0:
- event = handler._buffer_sync[0]
- assert event["type"] == "ws_close"
- assert event["url"] == "wss://api.deepgram.com/v1/listen"
- assert "ts" in event
- else:
- # Event may have been filtered or immediately flushed
- pass
-
- def test_uncaught_error_event_structure(self):
- """Test uncaught error event structure."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- uncaught_error = RuntimeError("Unexpected application error")
- handler.on_uncaught_error(error=uncaught_error)
-
- # Check if event was created
- if len(handler._buffer_sync) > 0:
- event = handler._buffer_sync[0]
- assert event["type"] == "uncaught_error"
- assert event["error"] == "RuntimeError"
- assert event["message"] == "Unexpected application error"
- assert "stack_trace" in event
- assert "ts" in event
- else:
- # Event may have been filtered or immediately flushed
- pass
-
-
-class TestBatchingTelemetryHandlerEdgeCases:
- """Test edge cases and error scenarios."""
-
- def test_handler_with_no_api_key(self):
- """Test handler initialization with no API key."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key=None,
- synchronous=True
- )
-
- assert handler._api_key is None
-
- # Should still buffer events
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- assert len(handler._buffer_sync) == 1
-
- def test_handler_with_debug_mode(self):
- """Test handler behavior with debug mode enabled."""
- with patch.dict('os.environ', {'DEEPGRAM_DEBUG': '1'}):
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- assert handler._debug is True
-
- def test_handler_context_provider_exception(self):
- """Test handler with context provider that raises exception."""
- def failing_context_provider():
- raise Exception("Context provider failed")
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- context_provider=failing_context_provider
- )
-
- # Should handle context provider exception gracefully
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- assert len(handler._buffer_sync) == 1
-
- def test_handler_with_custom_encoder_exception(self):
- """Test handler with encoder that raises exception."""
- def failing_encoder(events, context):
- raise Exception("Encoder failed")
-
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True,
- encode_batch=failing_encoder
- )
-
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # Flush should handle encoder exception gracefully
- handler.flush()
- assert handler._consecutive_failures == 1
-
- def test_handler_close_multiple_times(self):
- """Test that calling close() multiple times is safe."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=False
- )
-
- # Close multiple times should not raise
- handler.close()
- handler.close()
- handler.close()
-
- # Worker should be stopped
- assert handler._stop_event.is_set()
-
- def test_handler_close_synchronous_mode(self):
- """Test close() in synchronous mode."""
- handler = BatchingTelemetryHandler(
- endpoint="https://telemetry.deepgram.com/v1/events",
- api_key="test_key",
- synchronous=True
- )
-
- # Add events
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"}
- )
-
- # Close should flush remaining events if any exist
- handler.close()
- # The actual close() method handles flushing internally
- # Just verify it doesn't raise an exception
diff --git a/tests/unit/test_telemetry_handler.py b/tests/unit/test_telemetry_handler.py
deleted file mode 100644
index a3f09801..00000000
--- a/tests/unit/test_telemetry_handler.py
+++ /dev/null
@@ -1,511 +0,0 @@
-"""
-Unit tests for telemetry handler infrastructure.
-Tests the base TelemetryHandler interface and custom implementations.
-"""
-
-import pytest
-import typing
-from typing import Union
-import time
-from unittest.mock import Mock, patch
-
-from deepgram.extensions.telemetry.handler import TelemetryHandler
-
-
-class TestTelemetryHandler:
- """Test the base TelemetryHandler interface."""
-
- def test_handler_interface_methods_exist(self):
- """Test that all interface methods exist and are callable."""
- handler = TelemetryHandler()
-
- # HTTP methods
- assert callable(handler.on_http_request)
- assert callable(handler.on_http_response)
- assert callable(handler.on_http_error)
-
- # WebSocket methods
- assert callable(handler.on_ws_connect)
- assert callable(handler.on_ws_error)
- assert callable(handler.on_ws_close)
-
- # Uncaught error method
- assert callable(handler.on_uncaught_error)
-
- def test_handler_methods_do_nothing_by_default(self):
- """Test that default implementation methods do nothing (no exceptions)."""
- handler = TelemetryHandler()
-
- # HTTP methods should not raise
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"},
- extras={"client": "python-sdk"},
- request_details={"request_id": "test-123"}
- )
-
- handler.on_http_response(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- status_code=200,
- duration_ms=150.5,
- headers={"content-type": "application/json"},
- extras={"client": "python-sdk"},
- response_details={"request_id": "test-123"}
- )
-
- handler.on_http_error(
- method="POST",
- url="https://api.deepgram.com/v1/test",
- error=Exception("Test error"),
- duration_ms=1000.0,
- request_details={"request_id": "test-456"},
- response_details={"status_code": 500}
- )
-
- # WebSocket methods should not raise
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token test"},
- extras={"version": "v1"},
- request_details={"session_id": "ws-123"}
- )
-
- handler.on_ws_error(
- url="wss://api.deepgram.com/v1/listen",
- error=ConnectionError("Connection lost"),
- extras={"reconnect": "true"},
- request_details={"session_id": "ws-123"},
- response_details={"code": 1006}
- )
-
- handler.on_ws_close(url="wss://api.deepgram.com/v1/listen")
-
- # Uncaught error method should not raise
- handler.on_uncaught_error(error=RuntimeError("Uncaught error"))
-
-
-class CustomTelemetryHandler(TelemetryHandler):
- """Custom implementation for testing inheritance."""
-
- def __init__(self):
- self.events = []
-
- def on_http_request(
- self,
- *,
- method: str,
- url: str,
- headers: Union[typing.Mapping[str, str], None],
- extras: Union[typing.Mapping[str, str], None] = None,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None:
- self.events.append({
- "type": "http_request",
- "method": method,
- "url": url,
- "headers": dict(headers) if headers is not None else None,
- "extras": dict(extras) if extras is not None else None,
- "request_details": dict(request_details) if request_details is not None else None,
- })
-
- def on_http_response(
- self,
- *,
- method: str,
- url: str,
- status_code: int,
- duration_ms: float,
- headers: Union[typing.Mapping[str, str], None],
- extras: Union[typing.Mapping[str, str], None] = None,
- response_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None:
- self.events.append({
- "type": "http_response",
- "method": method,
- "url": url,
- "status_code": status_code,
- "duration_ms": duration_ms,
- "headers": dict(headers) if headers else None,
- "extras": dict(extras) if extras else None,
- "response_details": dict(response_details) if response_details else None,
- })
-
- def on_http_error(
- self,
- *,
- method: str,
- url: str,
- error: BaseException,
- duration_ms: float,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- response_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None:
- self.events.append({
- "type": "http_error",
- "method": method,
- "url": url,
- "error": str(error),
- "error_type": type(error).__name__,
- "duration_ms": duration_ms,
- "request_details": dict(request_details) if request_details else None,
- "response_details": dict(response_details) if response_details else None,
- })
-
- def on_ws_connect(
- self,
- *,
- url: str,
- headers: Union[typing.Mapping[str, str], None],
- extras: Union[typing.Mapping[str, str], None] = None,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None:
- self.events.append({
- "type": "ws_connect",
- "url": url,
- "headers": dict(headers) if headers else None,
- "extras": dict(extras) if extras else None,
- "request_details": dict(request_details) if request_details else None,
- })
-
- def on_ws_error(
- self,
- *,
- url: str,
- error: BaseException,
- extras: Union[typing.Mapping[str, str], None] = None,
- request_details: Union[typing.Mapping[str, typing.Any], None] = None,
- response_details: Union[typing.Mapping[str, typing.Any], None] = None,
- ) -> None:
- self.events.append({
- "type": "ws_error",
- "url": url,
- "error": str(error),
- "error_type": type(error).__name__,
- "extras": dict(extras) if extras else None,
- "request_details": dict(request_details) if request_details else None,
- "response_details": dict(response_details) if response_details else None,
- })
-
- def on_ws_close(
- self,
- *,
- url: str,
- ) -> None:
- self.events.append({
- "type": "ws_close",
- "url": url,
- })
-
- def on_uncaught_error(self, *, error: BaseException) -> None:
- self.events.append({
- "type": "uncaught_error",
- "error": str(error),
- "error_type": type(error).__name__,
- })
-
-
-class TestCustomTelemetryHandler:
- """Test custom telemetry handler implementation."""
-
- def test_custom_handler_inheritance(self):
- """Test that custom handler properly inherits from base."""
- handler = CustomTelemetryHandler()
- assert isinstance(handler, TelemetryHandler)
-
- def test_http_request_tracking(self):
- """Test HTTP request event tracking."""
- handler = CustomTelemetryHandler()
-
- handler.on_http_request(
- method="POST",
- url="https://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token abc123", "Content-Type": "application/json"},
- extras={"sdk": "python", "version": "3.2.1"},
- request_details={"request_id": "req-456", "payload_size": 1024}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "http_request"
- assert event["method"] == "POST"
- assert event["url"] == "https://api.deepgram.com/v1/listen"
- assert event["headers"]["Authorization"] == "Token abc123"
- assert event["extras"]["sdk"] == "python"
- assert event["request_details"]["request_id"] == "req-456"
-
- def test_http_response_tracking(self):
- """Test HTTP response event tracking."""
- handler = CustomTelemetryHandler()
-
- handler.on_http_response(
- method="GET",
- url="https://api.deepgram.com/v1/projects",
- status_code=200,
- duration_ms=245.7,
- headers={"content-type": "application/json"},
- extras={"region": "us-east-1"},
- response_details={"request_id": "req-789", "response_size": 2048}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "http_response"
- assert event["method"] == "GET"
- assert event["status_code"] == 200
- assert event["duration_ms"] == 245.7
- assert event["headers"]["content-type"] == "application/json"
- assert event["extras"]["region"] == "us-east-1"
- assert event["response_details"]["response_size"] == 2048
-
- def test_http_error_tracking(self):
- """Test HTTP error event tracking."""
- handler = CustomTelemetryHandler()
-
- test_error = ConnectionError("Network timeout")
- handler.on_http_error(
- method="PUT",
- url="https://api.deepgram.com/v1/models",
- error=test_error,
- duration_ms=5000.0,
- request_details={"request_id": "req-error", "retry_count": 2},
- response_details={"status_code": 503, "server_error": "Service Unavailable"}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "http_error"
- assert event["method"] == "PUT"
- assert event["error"] == "Network timeout"
- assert event["error_type"] == "ConnectionError"
- assert event["duration_ms"] == 5000.0
- assert event["request_details"]["retry_count"] == 2
- assert event["response_details"]["status_code"] == 503
-
- def test_websocket_connect_tracking(self):
- """Test WebSocket connection event tracking."""
- handler = CustomTelemetryHandler()
-
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/speak",
- headers={"Authorization": "Token xyz789"},
- extras={"protocol": "websocket", "version": "v1"},
- request_details={"session_id": "ws-connect-123"}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "ws_connect"
- assert event["url"] == "wss://api.deepgram.com/v1/speak"
- assert event["headers"]["Authorization"] == "Token xyz789"
- assert event["extras"]["protocol"] == "websocket"
- assert event["request_details"]["session_id"] == "ws-connect-123"
-
- def test_websocket_error_tracking(self):
- """Test WebSocket error event tracking."""
- handler = CustomTelemetryHandler()
-
- ws_error = ConnectionError("WebSocket connection closed unexpectedly")
- handler.on_ws_error(
- url="wss://api.deepgram.com/v1/agent",
- error=ws_error,
- extras={"reconnect_attempt": "3"},
- request_details={"session_id": "ws-error-456"},
- response_details={"close_code": 1006, "close_reason": "Abnormal closure"}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "ws_error"
- assert event["url"] == "wss://api.deepgram.com/v1/agent"
- assert event["error"] == "WebSocket connection closed unexpectedly"
- assert event["error_type"] == "ConnectionError"
- assert event["extras"]["reconnect_attempt"] == "3"
- assert event["response_details"]["close_code"] == 1006
-
- def test_websocket_close_tracking(self):
- """Test WebSocket close event tracking."""
- handler = CustomTelemetryHandler()
-
- handler.on_ws_close(url="wss://api.deepgram.com/v1/listen")
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "ws_close"
- assert event["url"] == "wss://api.deepgram.com/v1/listen"
-
- def test_uncaught_error_tracking(self):
- """Test uncaught error event tracking."""
- handler = CustomTelemetryHandler()
-
- uncaught_error = RuntimeError("Unexpected application error")
- handler.on_uncaught_error(error=uncaught_error)
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["type"] == "uncaught_error"
- assert event["error"] == "Unexpected application error"
- assert event["error_type"] == "RuntimeError"
-
- def test_multiple_events_tracking(self):
- """Test tracking multiple events in sequence."""
- handler = CustomTelemetryHandler()
-
- # HTTP request
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={"Authorization": "Token test"},
- )
-
- # HTTP response
- handler.on_http_response(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- status_code=200,
- duration_ms=100.0,
- headers={"content-type": "application/json"},
- )
-
- # WebSocket connect
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/listen",
- headers={"Authorization": "Token test"},
- )
-
- # WebSocket close
- handler.on_ws_close(url="wss://api.deepgram.com/v1/listen")
-
- assert len(handler.events) == 4
- assert handler.events[0]["type"] == "http_request"
- assert handler.events[1]["type"] == "http_response"
- assert handler.events[2]["type"] == "ws_connect"
- assert handler.events[3]["type"] == "ws_close"
-
- def test_handler_with_none_values(self):
- """Test handler methods with None optional parameters."""
- handler = CustomTelemetryHandler()
-
- # Test with minimal parameters (None optionals)
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers=None,
- extras=None,
- request_details=None
- )
-
- handler.on_ws_connect(
- url="wss://api.deepgram.com/v1/listen",
- headers=None,
- extras=None,
- request_details=None
- )
-
- assert len(handler.events) == 2
- assert handler.events[0]["headers"] is None
- assert handler.events[0]["extras"] is None
- assert handler.events[0]["request_details"] is None
- assert handler.events[1]["headers"] is None
- assert handler.events[1]["extras"] is None
- assert handler.events[1]["request_details"] is None
-
-
-class TestTelemetryHandlerEdgeCases:
- """Test edge cases and error scenarios for telemetry handlers."""
-
- def test_handler_with_empty_collections(self):
- """Test handler with empty dictionaries."""
- handler = CustomTelemetryHandler()
-
- handler.on_http_request(
- method="GET",
- url="https://api.deepgram.com/v1/test",
- headers={},
- extras={},
- request_details={}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- # Empty dicts are converted to empty dicts, not None
- assert event["headers"] == {}
- assert event["extras"] == {}
- assert event["request_details"] == {}
-
- def test_handler_with_unicode_data(self):
- """Test handler with Unicode strings."""
- handler = CustomTelemetryHandler()
-
- handler.on_http_request(
- method="POST",
- url="https://api.deepgram.com/v1/ζ΅θ―",
- headers={"User-Agent": "SDKζ΅θ―"},
- extras={"description": "ΡΠ΅ΡΡ"},
- request_details={"message": "π Test"}
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert "ζ΅θ―" in event["url"]
- assert event["headers"]["User-Agent"] == "SDKζ΅θ―"
- assert event["extras"]["description"] == "ΡΠ΅ΡΡ"
- assert event["request_details"]["message"] == "π Test"
-
- def test_handler_with_large_data(self):
- """Test handler with large data structures."""
- handler = CustomTelemetryHandler()
-
- large_headers = {f"header_{i}": f"value_{i}" for i in range(100)}
- large_extras = {f"extra_{i}": f"data_{i}" for i in range(50)}
-
- handler.on_http_response(
- method="POST",
- url="https://api.deepgram.com/v1/large",
- status_code=200,
- duration_ms=2500.0,
- headers=large_headers,
- extras=large_extras,
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert len(event["headers"]) == 100
- assert len(event["extras"]) == 50
- assert event["headers"]["header_50"] == "value_50"
- assert event["extras"]["extra_25"] == "data_25"
-
- def test_handler_with_nested_error_details(self):
- """Test handler with complex nested error details."""
- handler = CustomTelemetryHandler()
-
- complex_error = ValueError("Complex validation error")
- nested_details = {
- "error_context": {
- "validation_errors": [
- {"field": "audio", "message": "Invalid format"},
- {"field": "model", "message": "Not supported"}
- ],
- "request_metadata": {
- "timestamp": time.time(),
- "client_version": "3.2.1",
- "feature_flags": {"new_models": True, "beta_features": False}
- }
- }
- }
-
- handler.on_http_error(
- method="POST",
- url="https://api.deepgram.com/v1/validate",
- error=complex_error,
- duration_ms=150.0,
- response_details=nested_details
- )
-
- assert len(handler.events) == 1
- event = handler.events[0]
- assert event["error_type"] == "ValueError"
- assert event["response_details"]["error_context"]["validation_errors"][0]["field"] == "audio"
- assert event["response_details"]["error_context"]["request_metadata"]["client_version"] == "3.2.1"
- assert event["response_details"]["error_context"]["request_metadata"]["feature_flags"]["new_models"] is True
diff --git a/tests/unit/test_telemetry_models.py b/tests/unit/test_telemetry_models.py
deleted file mode 100644
index ce0ce585..00000000
--- a/tests/unit/test_telemetry_models.py
+++ /dev/null
@@ -1,719 +0,0 @@
-"""
-Unit tests for telemetry models.
-Tests the Pydantic models used for telemetry data structures.
-"""
-
-import pytest
-import typing
-from datetime import datetime, timezone
-from enum import Enum
-import pydantic
-
-from deepgram.extensions.telemetry.models import (
- ErrorSeverity,
- TelemetryContext,
- TelemetryEvent,
- ErrorEvent
-)
-
-
-class TestErrorSeverity:
- """Test ErrorSeverity enum."""
-
- def test_error_severity_values(self):
- """Test that all error severity values are defined correctly."""
- assert ErrorSeverity.UNSPECIFIED == "ERROR_SEVERITY_UNSPECIFIED"
- assert ErrorSeverity.INFO == "ERROR_SEVERITY_INFO"
- assert ErrorSeverity.WARNING == "ERROR_SEVERITY_WARNING"
- assert ErrorSeverity.ERROR == "ERROR_SEVERITY_ERROR"
- assert ErrorSeverity.CRITICAL == "ERROR_SEVERITY_CRITICAL"
-
- def test_error_severity_is_string_enum(self):
- """Test that ErrorSeverity is a string enum."""
- assert issubclass(ErrorSeverity, str)
- assert issubclass(ErrorSeverity, Enum)
-
- def test_error_severity_string_representation(self):
- """Test string representation of error severity values."""
- # In Python, string enums return their value when converted to string
- assert ErrorSeverity.ERROR.value == "ERROR_SEVERITY_ERROR"
- assert ErrorSeverity.WARNING.value == "ERROR_SEVERITY_WARNING"
-
- def test_error_severity_comparison(self):
- """Test error severity comparison."""
- # String comparison should work
- assert ErrorSeverity.ERROR == "ERROR_SEVERITY_ERROR"
- assert ErrorSeverity.WARNING != "ERROR_SEVERITY_ERROR"
-
- # Enum comparison should work
- assert ErrorSeverity.ERROR == ErrorSeverity.ERROR
- assert ErrorSeverity.ERROR != ErrorSeverity.WARNING
-
- def test_error_severity_iteration(self):
- """Test that all error severity values can be iterated."""
- severities = list(ErrorSeverity)
- assert len(severities) == 5
- assert ErrorSeverity.UNSPECIFIED in severities
- assert ErrorSeverity.INFO in severities
- assert ErrorSeverity.WARNING in severities
- assert ErrorSeverity.ERROR in severities
- assert ErrorSeverity.CRITICAL in severities
-
-
-class TestTelemetryContext:
- """Test TelemetryContext model."""
-
- def test_telemetry_context_creation_empty(self):
- """Test creating empty TelemetryContext."""
- context = TelemetryContext()
-
- assert context.package_name is None
- assert context.package_version is None
- assert context.language is None
- assert context.runtime_version is None
- assert context.os is None
- assert context.arch is None
- assert context.app_name is None
- assert context.app_version is None
- assert context.environment is None
- assert context.session_id is None
- assert context.installation_id is None
- assert context.project_id is None
-
- def test_telemetry_context_creation_full(self):
- """Test creating TelemetryContext with all fields."""
- context = TelemetryContext(
- package_name="python-sdk",
- package_version="3.2.1",
- language="python",
- runtime_version="python 3.11.6",
- os="darwin",
- arch="arm64",
- app_name="test_app",
- app_version="1.0.0",
- environment="test",
- session_id="session-123",
- installation_id="install-456",
- project_id="project-789"
- )
-
- assert context.package_name == "python-sdk"
- assert context.package_version == "3.2.1"
- assert context.language == "python"
- assert context.runtime_version == "python 3.11.6"
- assert context.os == "darwin"
- assert context.arch == "arm64"
- assert context.app_name == "test_app"
- assert context.app_version == "1.0.0"
- assert context.environment == "test"
- assert context.session_id == "session-123"
- assert context.installation_id == "install-456"
- assert context.project_id == "project-789"
-
- def test_telemetry_context_partial_fields(self):
- """Test creating TelemetryContext with partial fields."""
- context = TelemetryContext(
- package_name="python-sdk",
- package_version="3.2.1",
- language="python",
- environment="production"
- )
-
- assert context.package_name == "python-sdk"
- assert context.package_version == "3.2.1"
- assert context.language == "python"
- assert context.environment == "production"
- # Unspecified fields should be None
- assert context.runtime_version is None
- assert context.os is None
- assert context.arch is None
- assert context.app_name is None
- assert context.app_version is None
- assert context.session_id is None
- assert context.installation_id is None
- assert context.project_id is None
-
- def test_telemetry_context_serialization(self):
- """Test TelemetryContext serialization."""
- context = TelemetryContext(
- package_name="python-sdk",
- package_version="3.2.1",
- language="python",
- os="linux",
- arch="x86_64"
- )
-
- # Test model_dump (Pydantic v2) or dict (Pydantic v1)
- try:
- data = context.model_dump()
- except AttributeError:
- data = context.dict()
-
- assert data["package_name"] == "python-sdk"
- assert data["package_version"] == "3.2.1"
- assert data["language"] == "python"
- assert data["os"] == "linux"
- assert data["arch"] == "x86_64"
- assert data["runtime_version"] is None
-
- def test_telemetry_context_deserialization(self):
- """Test TelemetryContext deserialization."""
- data = {
- "package_name": "node-sdk",
- "package_version": "2.1.0",
- "language": "node",
- "runtime_version": "node 18.17.0",
- "os": "windows",
- "arch": "x64"
- }
-
- context = TelemetryContext(**data)
-
- assert context.package_name == "node-sdk"
- assert context.package_version == "2.1.0"
- assert context.language == "node"
- assert context.runtime_version == "node 18.17.0"
- assert context.os == "windows"
- assert context.arch == "x64"
-
- def test_telemetry_context_extra_fields_allowed(self):
- """Test that TelemetryContext allows extra fields."""
- # This should not raise due to extra="allow"
- context = TelemetryContext(
- package_name="python-sdk",
- custom_field="custom_value",
- another_field=123
- )
-
- assert context.package_name == "python-sdk"
- # Extra fields should be accessible (depending on Pydantic version)
- try:
- data = context.model_dump()
- except AttributeError:
- data = context.dict()
-
- assert "custom_field" in data or hasattr(context, 'custom_field')
-
- def test_telemetry_context_immutability(self):
- """Test that TelemetryContext is immutable (frozen=True)."""
- context = TelemetryContext(
- package_name="python-sdk",
- package_version="3.2.1"
- )
-
- # Should not be able to modify fields
- with pytest.raises((pydantic.ValidationError, AttributeError, TypeError)):
- context.package_name = "modified-sdk"
-
- def test_telemetry_context_unicode_values(self):
- """Test TelemetryContext with Unicode values."""
- context = TelemetryContext(
- package_name="python-sdk",
- app_name="ζ΅θ―εΊη¨",
- environment="ΡΠ΅ΡΡ",
- session_id="πsession-123"
- )
-
- assert context.package_name == "python-sdk"
- assert context.app_name == "ζ΅θ―εΊη¨"
- assert context.environment == "ΡΠ΅ΡΡ"
- assert context.session_id == "πsession-123"
-
-
-class TestTelemetryEvent:
- """Test TelemetryEvent model."""
-
- def test_telemetry_event_creation_minimal(self):
- """Test creating minimal TelemetryEvent."""
- event_time = datetime.now(timezone.utc)
- event = TelemetryEvent(
- name="test.event",
- time=event_time
- )
-
- assert event.name == "test.event"
- assert event.time == event_time
- assert event.attributes is None
- assert event.metrics is None
-
- def test_telemetry_event_creation_full(self):
- """Test creating TelemetryEvent with all fields."""
- event_time = datetime.now(timezone.utc)
- attributes = {"service": "deepgram", "version": "3.2.1", "region": "us-east-1"}
- metrics = {"duration_ms": 150.5, "payload_size": 1024.0, "response_size": 2048.0}
-
- event = TelemetryEvent(
- name="http.request.completed",
- time=event_time,
- attributes=attributes,
- metrics=metrics
- )
-
- assert event.name == "http.request.completed"
- assert event.time == event_time
- assert event.attributes == attributes
- assert event.metrics == metrics
-
- def test_telemetry_event_missing_required_fields(self):
- """Test TelemetryEvent validation with missing required fields."""
- # Missing name
- with pytest.raises(pydantic.ValidationError) as exc_info:
- TelemetryEvent(time=datetime.now(timezone.utc))
-
- errors = exc_info.value.errors()
- field_names = [error["loc"][0] for error in errors]
- assert "name" in field_names
-
- # Missing time
- with pytest.raises(pydantic.ValidationError) as exc_info:
- TelemetryEvent(name="test.event")
-
- errors = exc_info.value.errors()
- field_names = [error["loc"][0] for error in errors]
- assert "time" in field_names
-
- def test_telemetry_event_wrong_types(self):
- """Test TelemetryEvent validation with wrong types."""
- # Wrong name type
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name=123, # Should be string
- time=datetime.now(timezone.utc)
- )
-
- # Wrong time type
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name="test.event",
- time="not_a_datetime" # Should be datetime
- )
-
- # Wrong attributes type
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name="test.event",
- time=datetime.now(timezone.utc),
- attributes="not_a_dict" # Should be dict
- )
-
- # Wrong metrics type
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name="test.event",
- time=datetime.now(timezone.utc),
- metrics="not_a_dict" # Should be dict
- )
-
- def test_telemetry_event_attributes_validation(self):
- """Test TelemetryEvent attributes validation."""
- event_time = datetime.now(timezone.utc)
-
- # Valid string attributes
- event = TelemetryEvent(
- name="test.event",
- time=event_time,
- attributes={"key1": "value1", "key2": "value2"}
- )
- assert event.attributes == {"key1": "value1", "key2": "value2"}
-
- # Invalid attributes (non-string values)
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name="test.event",
- time=event_time,
- attributes={"key1": "value1", "key2": 123} # 123 is not string
- )
-
- def test_telemetry_event_metrics_validation(self):
- """Test TelemetryEvent metrics validation."""
- event_time = datetime.now(timezone.utc)
-
- # Valid float metrics
- event = TelemetryEvent(
- name="test.event",
- time=event_time,
- metrics={"metric1": 123.45, "metric2": 67.89}
- )
- assert event.metrics == {"metric1": 123.45, "metric2": 67.89}
-
- # Invalid metrics (non-float values)
- with pytest.raises(pydantic.ValidationError):
- TelemetryEvent(
- name="test.event",
- time=event_time,
- metrics={"metric1": 123.45, "metric2": "not_a_float"}
- )
-
- def test_telemetry_event_serialization(self):
- """Test TelemetryEvent serialization."""
- event_time = datetime(2023, 12, 1, 12, 0, 0, tzinfo=timezone.utc)
- event = TelemetryEvent(
- name="api.request",
- time=event_time,
- attributes={"method": "POST", "endpoint": "/v1/listen"},
- metrics={"duration_ms": 250.0, "size_bytes": 1024.0}
- )
-
- try:
- data = event.model_dump()
- except AttributeError:
- data = event.dict()
-
- assert data["name"] == "api.request"
- assert data["attributes"]["method"] == "POST"
- assert data["metrics"]["duration_ms"] == 250.0
-
- def test_telemetry_event_deserialization(self):
- """Test TelemetryEvent deserialization."""
- data = {
- "name": "websocket.error",
- "time": "2023-12-01T12:00:00Z",
- "attributes": {"url": "wss://api.deepgram.com", "error_type": "ConnectionError"},
- "metrics": {"reconnect_attempts": 3.0, "downtime_ms": 5000.0}
- }
-
- event = TelemetryEvent(**data)
-
- assert event.name == "websocket.error"
- assert event.attributes["url"] == "wss://api.deepgram.com"
- assert event.metrics["reconnect_attempts"] == 3.0
-
- def test_telemetry_event_immutability(self):
- """Test that TelemetryEvent is immutable."""
- event_time = datetime.now(timezone.utc)
- event = TelemetryEvent(
- name="test.event",
- time=event_time
- )
-
- # Should not be able to modify fields
- with pytest.raises((pydantic.ValidationError, AttributeError, TypeError)):
- event.name = "modified.event"
-
- def test_telemetry_event_extra_fields_allowed(self):
- """Test that TelemetryEvent allows extra fields."""
- event_time = datetime.now(timezone.utc)
-
- # This should not raise due to extra="allow"
- event = TelemetryEvent(
- name="test.event",
- time=event_time,
- custom_field="custom_value",
- another_field=123
- )
-
- assert event.name == "test.event"
- assert event.time == event_time
-
- def test_telemetry_event_unicode_values(self):
- """Test TelemetryEvent with Unicode values."""
- event_time = datetime.now(timezone.utc)
- event = TelemetryEvent(
- name="ζ΅θ―.δΊδ»Ά",
- time=event_time,
- attributes={"ζθΏ°": "ΡΠ΅ΡΡ", "emoji": "π"},
- metrics={"ΠΌΠ΅ΡΡΠΈΠΊΠ°": 123.45}
- )
-
- assert event.name == "ζ΅θ―.δΊδ»Ά"
- assert event.attributes["ζθΏ°"] == "ΡΠ΅ΡΡ"
- assert event.attributes["emoji"] == "π"
- assert event.metrics["ΠΌΠ΅ΡΡΠΈΠΊΠ°"] == 123.45
-
-
-class TestErrorEvent:
- """Test ErrorEvent model."""
-
- def test_error_event_creation_minimal(self):
- """Test creating minimal ErrorEvent."""
- event_time = datetime.now(timezone.utc)
- event = ErrorEvent(
- type="ConnectionError",
- message="Connection failed",
- severity=ErrorSeverity.ERROR,
- time=event_time
- )
-
- assert event.type == "ConnectionError"
- assert event.message == "Connection failed"
- assert event.severity == ErrorSeverity.ERROR
- assert event.time == event_time
- assert event.stack_trace is None
- assert event.handled is False # Default value
-
- def test_error_event_creation_full(self):
- """Test creating ErrorEvent with all fields."""
- event_time = datetime.now(timezone.utc)
- stack_trace = "Traceback (most recent call last):\n File ...\nConnectionError: Connection failed"
-
- event = ErrorEvent(
- type="ConnectionError",
- message="Network timeout occurred",
- severity=ErrorSeverity.CRITICAL,
- time=event_time,
- stack_trace=stack_trace,
- handled=False
- )
-
- assert event.type == "ConnectionError"
- assert event.message == "Network timeout occurred"
- assert event.severity == ErrorSeverity.CRITICAL
- assert event.time == event_time
- assert event.stack_trace == stack_trace
- assert event.handled is False
-
- def test_error_event_missing_required_fields(self):
- """Test ErrorEvent validation with missing required fields."""
- event_time = datetime.now(timezone.utc)
-
- # All fields are optional except time, so we test missing time
- # Missing time (required field)
- with pytest.raises(pydantic.ValidationError) as exc_info:
- ErrorEvent(
- type="ConnectionError",
- message="Connection failed",
- severity=ErrorSeverity.ERROR
- )
- errors = exc_info.value.errors()
- field_names = [error["loc"][0] for error in errors]
- assert "time" in field_names
-
- # Since most fields are optional, let's just test that we can create
- # a minimal valid ErrorEvent
- minimal_event = ErrorEvent(time=event_time)
- assert minimal_event.time == event_time
- assert minimal_event.type is None
- assert minimal_event.message is None
- assert minimal_event.severity == ErrorSeverity.UNSPECIFIED # Default value
-
- def test_error_event_wrong_types(self):
- """Test ErrorEvent validation with wrong types."""
- event_time = datetime.now(timezone.utc)
-
- # Wrong type field
- with pytest.raises(pydantic.ValidationError):
- ErrorEvent(
- type=123, # Should be string
- message="Connection failed",
- severity=ErrorSeverity.ERROR,
- time=event_time
- )
-
- # Since most fields are optional and have default values,
- # let's test that the model accepts valid values
- valid_event = ErrorEvent(
- type="ConnectionError",
- message="Connection failed",
- severity=ErrorSeverity.ERROR,
- time=event_time,
- handled=True
- )
-
- assert valid_event.type == "ConnectionError"
- assert valid_event.message == "Connection failed"
- assert valid_event.severity == ErrorSeverity.ERROR
- assert valid_event.handled is True
-
- def test_error_event_severity_enum_values(self):
- """Test ErrorEvent with different severity values."""
- event_time = datetime.now(timezone.utc)
-
- for severity in ErrorSeverity:
- event = ErrorEvent(
- type="TestError",
- message="Test message",
- severity=severity,
- time=event_time
- )
- assert event.severity == severity
-
- def test_error_event_serialization(self):
- """Test ErrorEvent serialization."""
- event_time = datetime(2023, 12, 1, 12, 0, 0, tzinfo=timezone.utc)
- event = ErrorEvent(
- type="ValidationError",
- message="Invalid input data",
- severity=ErrorSeverity.WARNING,
- time=event_time,
- stack_trace="Stack trace here",
- handled=True
- )
-
- try:
- data = event.model_dump()
- except AttributeError:
- data = event.dict()
-
- assert data["type"] == "ValidationError"
- assert data["message"] == "Invalid input data"
- assert data["severity"] == "ERROR_SEVERITY_WARNING"
- assert data["stack_trace"] == "Stack trace here"
- assert data["handled"] is True
-
- def test_error_event_deserialization(self):
- """Test ErrorEvent deserialization."""
- data = {
- "type": "TimeoutError",
- "message": "Request timed out",
- "severity": "ERROR_SEVERITY_ERROR",
- "time": "2023-12-01T12:00:00Z",
- "stack_trace": "Traceback...",
- "handled": False
- }
-
- event = ErrorEvent(**data)
-
- assert event.type == "TimeoutError"
- assert event.message == "Request timed out"
- assert event.severity == ErrorSeverity.ERROR
- assert event.stack_trace == "Traceback..."
- assert event.handled is False
-
- def test_error_event_immutability(self):
- """Test that ErrorEvent is immutable."""
- event_time = datetime.now(timezone.utc)
- event = ErrorEvent(
- type="TestError",
- message="Test message",
- severity=ErrorSeverity.ERROR,
- time=event_time
- )
-
- # Should not be able to modify fields
- with pytest.raises((pydantic.ValidationError, AttributeError, TypeError)):
- event.type = "ModifiedError"
-
- def test_error_event_unicode_values(self):
- """Test ErrorEvent with Unicode values."""
- event_time = datetime.now(timezone.utc)
- event = ErrorEvent(
- type="Π£Π½ΠΈΠΊΠΎΠ΄ΠΡΠΈΠ±ΠΊΠ°",
- message="ζ΅θ―ιθ――ζΆζ― π¨",
- severity=ErrorSeverity.CRITICAL,
- time=event_time,
- stack_trace="Stack trace with ΡΠ΅ΡΡ unicode"
- )
-
- assert event.type == "Π£Π½ΠΈΠΊΠΎΠ΄ΠΡΠΈΠ±ΠΊΠ°"
- assert event.message == "ζ΅θ―ιθ――ζΆζ― π¨"
- assert "ΡΠ΅ΡΡ" in event.stack_trace
-
- def test_error_event_large_stack_trace(self):
- """Test ErrorEvent with large stack trace."""
- event_time = datetime.now(timezone.utc)
- large_stack_trace = "Traceback (most recent call last):\n" + " Line of stack trace\n" * 1000
-
- event = ErrorEvent(
- type="LargeStackError",
- message="Error with large stack trace",
- severity=ErrorSeverity.ERROR,
- time=event_time,
- stack_trace=large_stack_trace
- )
-
- assert event.type == "LargeStackError"
- assert len(event.stack_trace) > 10000
- assert event.stack_trace.startswith("Traceback")
-
-
-class TestTelemetryModelIntegration:
- """Test integration scenarios with telemetry models."""
-
- def test_complete_telemetry_scenario(self):
- """Test a complete telemetry scenario with all models."""
- # Create context
- context = TelemetryContext(
- package_name="python-sdk",
- package_version="3.2.1",
- language="python",
- runtime_version="python 3.11.6",
- os="darwin",
- arch="arm64",
- environment="production"
- )
-
- # Create telemetry event
- event_time = datetime.now(timezone.utc)
- telemetry_event = TelemetryEvent(
- name="http.request.completed",
- time=event_time,
- attributes={"method": "POST", "endpoint": "/v1/listen", "status": "success"},
- metrics={"duration_ms": 245.5, "payload_size": 1024.0, "response_size": 2048.0}
- )
-
- # Create error event
- error_event = ErrorEvent(
- type="ConnectionError",
- message="Network timeout during request",
- severity=ErrorSeverity.WARNING,
- time=event_time,
- handled=True
- )
-
- # Verify all models are properly created
- assert context.package_name == "python-sdk"
- assert telemetry_event.name == "http.request.completed"
- assert error_event.type == "ConnectionError"
- assert error_event.severity == ErrorSeverity.WARNING
-
- def test_model_serialization_consistency(self):
- """Test that all models serialize consistently."""
- event_time = datetime(2023, 12, 1, 12, 0, 0, tzinfo=timezone.utc)
-
- context = TelemetryContext(package_name="test-sdk", package_version="1.0.0")
- telemetry_event = TelemetryEvent(name="test.event", time=event_time)
- error_event = ErrorEvent(
- type="TestError",
- message="Test message",
- severity=ErrorSeverity.INFO,
- time=event_time
- )
-
- # All models should serialize without errors
- try:
- context_data = context.model_dump()
- telemetry_data = telemetry_event.model_dump()
- error_data = error_event.model_dump()
- except AttributeError:
- context_data = context.dict()
- telemetry_data = telemetry_event.dict()
- error_data = error_event.dict()
-
- # Verify basic structure
- assert isinstance(context_data, dict)
- assert isinstance(telemetry_data, dict)
- assert isinstance(error_data, dict)
-
- assert "package_name" in context_data
- assert "name" in telemetry_data
- assert "type" in error_data
-
- def test_model_validation_edge_cases(self):
- """Test model validation with edge cases."""
- event_time = datetime.now(timezone.utc)
-
- # Empty string values
- context = TelemetryContext(package_name="", package_version="")
- assert context.package_name == ""
- assert context.package_version == ""
-
- # Empty attributes and metrics
- telemetry_event = TelemetryEvent(
- name="test.event",
- time=event_time,
- attributes={},
- metrics={}
- )
- assert telemetry_event.attributes == {}
- assert telemetry_event.metrics == {}
-
- # Empty stack trace
- error_event = ErrorEvent(
- type="TestError",
- message="",
- severity=ErrorSeverity.UNSPECIFIED,
- time=event_time,
- stack_trace=""
- )
- assert error_event.message == ""
- assert error_event.stack_trace == ""
diff --git a/tests/unit/test_type_definitions.py b/tests/unit/test_type_definitions.py
deleted file mode 100644
index cad86e95..00000000
--- a/tests/unit/test_type_definitions.py
+++ /dev/null
@@ -1,431 +0,0 @@
-"""
-Unit tests for auto-generated type definitions.
-
-This module tests the various auto-generated type definitions including:
-- Simple type aliases
-- Union types
-- Pydantic models
-- Optional/Any types
-"""
-
-import typing
-import pytest
-import pydantic
-from unittest.mock import Mock
-
-# Import the types we want to test
-from deepgram.types.error_response import ErrorResponse
-from deepgram.types.error_response_text_error import ErrorResponseTextError
-from deepgram.types.error_response_legacy_error import ErrorResponseLegacyError
-from deepgram.types.error_response_modern_error import ErrorResponseModernError
-from deepgram.types.listen_v1model import ListenV1Model
-from deepgram.types.listen_v1callback import ListenV1Callback
-from deepgram.types.listen_v1tag import ListenV1Tag
-from deepgram.types.listen_v1response import ListenV1Response
-from deepgram.types.listen_v1response_metadata import ListenV1ResponseMetadata
-from deepgram.types.listen_v1response_results import ListenV1ResponseResults
-
-
-class TestSimpleTypeAliases:
- """Test simple type aliases like str, Optional[Any], etc."""
-
- def test_error_response_text_error_is_str(self):
- """Test that ErrorResponseTextError is a str type alias."""
- assert ErrorResponseTextError == str
-
- def test_error_response_text_error_usage(self):
- """Test that ErrorResponseTextError can be used as a string."""
- error_message: ErrorResponseTextError = "Authentication failed"
- assert isinstance(error_message, str)
- assert error_message == "Authentication failed"
-
- def test_listen_v1callback_is_optional_any(self):
- """Test that ListenV1Callback is Optional[Any]."""
- assert ListenV1Callback == typing.Optional[typing.Any]
-
- def test_listen_v1callback_usage(self):
- """Test that ListenV1Callback can accept None or any value."""
- callback1: ListenV1Callback = None
- callback2: ListenV1Callback = "http://example.com/webhook"
- callback3: ListenV1Callback = {"url": "http://example.com", "method": "POST"}
-
- assert callback1 is None
- assert isinstance(callback2, str)
- assert isinstance(callback3, dict)
-
- def test_listen_v1tag_is_optional_any(self):
- """Test that ListenV1Tag is Optional[Any]."""
- assert ListenV1Tag == typing.Optional[typing.Any]
-
- def test_listen_v1tag_usage(self):
- """Test that ListenV1Tag can accept None or any value."""
- tag1: ListenV1Tag = None
- tag2: ListenV1Tag = "my-tag"
- tag3: ListenV1Tag = ["tag1", "tag2"]
-
- assert tag1 is None
- assert isinstance(tag2, str)
- assert isinstance(tag3, list)
-
-
-class TestUnionTypes:
- """Test union types like ErrorResponse and ListenV1Model."""
-
- def test_error_response_union_structure(self):
- """Test that ErrorResponse is a union of the three error types."""
- assert ErrorResponse == typing.Union[ErrorResponseTextError, ErrorResponseLegacyError, ErrorResponseModernError]
-
- def test_error_response_accepts_string(self):
- """Test that ErrorResponse can accept a string (ErrorResponseTextError)."""
- error: ErrorResponse = "Simple error message"
- assert isinstance(error, str)
-
- def test_error_response_accepts_legacy_error(self):
- """Test that ErrorResponse can accept ErrorResponseLegacyError."""
- legacy_error = ErrorResponseLegacyError(
- err_code="AUTH_001",
- err_msg="Invalid API key",
- request_id="req_123"
- )
- error: ErrorResponse = legacy_error
- assert isinstance(error, ErrorResponseLegacyError)
-
- def test_error_response_accepts_modern_error(self):
- """Test that ErrorResponse can accept ErrorResponseModernError."""
- modern_error = ErrorResponseModernError(
- category="authentication",
- message="Invalid API key provided",
- details="The API key is missing or malformed",
- request_id="req_456"
- )
- error: ErrorResponse = modern_error
- assert isinstance(error, ErrorResponseModernError)
-
- def test_listen_v1model_union_structure(self):
- """Test that ListenV1Model is a union of literal strings and Any."""
- # Check that it's a union type
- origin = typing.get_origin(ListenV1Model)
- assert origin is typing.Union
-
- # Check that it includes typing.Any as one of the union members
- args = typing.get_args(ListenV1Model)
- assert typing.Any in args
-
- def test_listen_v1model_accepts_literal_values(self):
- """Test that ListenV1Model accepts predefined literal values."""
- valid_models = [
- "nova-3", "nova-2", "nova", "enhanced", "base",
- "meeting", "phonecall", "finance", "custom"
- ]
-
- for model in valid_models:
- model_value: ListenV1Model = model
- assert isinstance(model_value, str)
-
- def test_listen_v1model_accepts_any_value(self):
- """Test that ListenV1Model accepts any value due to typing.Any."""
- # String not in literals
- custom_model: ListenV1Model = "my-custom-model"
- assert isinstance(custom_model, str)
-
- # Non-string value
- numeric_model: ListenV1Model = 123
- assert isinstance(numeric_model, int)
-
- # Complex value
- dict_model: ListenV1Model = {"name": "custom", "version": "1.0"}
- assert isinstance(dict_model, dict)
-
-
-class TestPydanticModels:
- """Test Pydantic models like ErrorResponseLegacyError, ErrorResponseModernError, etc."""
-
- def test_error_response_legacy_error_creation(self):
- """Test creating ErrorResponseLegacyError with all fields."""
- error = ErrorResponseLegacyError(
- err_code="AUTH_001",
- err_msg="Invalid API key",
- request_id="req_123"
- )
-
- assert error.err_code == "AUTH_001"
- assert error.err_msg == "Invalid API key"
- assert error.request_id == "req_123"
-
- def test_error_response_legacy_error_optional_fields(self):
- """Test creating ErrorResponseLegacyError with optional fields."""
- error = ErrorResponseLegacyError()
-
- assert error.err_code is None
- assert error.err_msg is None
- assert error.request_id is None
-
- def test_error_response_legacy_error_partial_fields(self):
- """Test creating ErrorResponseLegacyError with some fields."""
- error = ErrorResponseLegacyError(err_code="ERR_001")
-
- assert error.err_code == "ERR_001"
- assert error.err_msg is None
- assert error.request_id is None
-
- def test_error_response_legacy_error_serialization(self):
- """Test serialization of ErrorResponseLegacyError."""
- error = ErrorResponseLegacyError(
- err_code="AUTH_001",
- err_msg="Invalid API key",
- request_id="req_123"
- )
-
- # Test serialization - use model_dump if available (Pydantic V2), otherwise dict
- try:
- serialized = error.model_dump()
- except AttributeError:
- serialized = error.dict()
-
- expected = {
- "err_code": "AUTH_001",
- "err_msg": "Invalid API key",
- "request_id": "req_123"
- }
- assert serialized == expected
-
- def test_error_response_legacy_error_immutability(self):
- """Test that ErrorResponseLegacyError is immutable (frozen)."""
- error = ErrorResponseLegacyError(err_code="TEST")
-
- with pytest.raises((AttributeError, pydantic.ValidationError)):
- error.err_code = "CHANGED"
-
- def test_error_response_modern_error_creation(self):
- """Test creating ErrorResponseModernError with all fields."""
- error = ErrorResponseModernError(
- category="authentication",
- message="Invalid API key provided",
- details="The API key is missing or malformed",
- request_id="req_456"
- )
-
- assert error.category == "authentication"
- assert error.message == "Invalid API key provided"
- assert error.details == "The API key is missing or malformed"
- assert error.request_id == "req_456"
-
- def test_error_response_modern_error_optional_fields(self):
- """Test creating ErrorResponseModernError with optional fields."""
- error = ErrorResponseModernError()
-
- assert error.category is None
- assert error.message is None
- assert error.details is None
- assert error.request_id is None
-
- def test_error_response_modern_error_serialization(self):
- """Test serialization of ErrorResponseModernError."""
- error = ErrorResponseModernError(
- category="validation",
- message="Invalid input",
- details="The request body contains invalid data"
- )
-
- # Test serialization - use model_dump if available (Pydantic V2), otherwise dict
- try:
- serialized = error.model_dump()
- except AttributeError:
- serialized = error.dict()
-
- expected = {
- "category": "validation",
- "message": "Invalid input",
- "details": "The request body contains invalid data",
- "request_id": None
- }
- assert serialized == expected
-
- def test_error_response_modern_error_immutability(self):
- """Test that ErrorResponseModernError is immutable (frozen)."""
- error = ErrorResponseModernError(category="test")
-
- with pytest.raises((AttributeError, pydantic.ValidationError)):
- error.category = "changed"
-
-
-class TestComplexPydanticModels:
- """Test complex Pydantic models with nested structures."""
-
- def test_listen_v1response_structure_validation(self):
- """Test that ListenV1Response validates required fields."""
- # Test that missing required fields raise validation errors
- with pytest.raises(pydantic.ValidationError) as exc_info:
- ListenV1Response()
-
- error = exc_info.value
- assert "metadata" in str(error)
- assert "results" in str(error)
-
- def test_listen_v1response_type_annotations(self):
- """Test that ListenV1Response has correct type annotations."""
- # Check that the model has the expected fields
- fields = ListenV1Response.model_fields if hasattr(ListenV1Response, 'model_fields') else ListenV1Response.__fields__
-
- assert "metadata" in fields
- assert "results" in fields
-
- # Check that these are the only required fields
- assert len(fields) == 2
-
-
-class TestTypeDefinitionEdgeCases:
- """Test edge cases and error conditions for type definitions."""
-
- def test_error_response_legacy_error_extra_fields_allowed(self):
- """Test that ErrorResponseLegacyError allows extra fields."""
- # This should not raise an error due to extra="allow"
- error = ErrorResponseLegacyError(
- err_code="TEST",
- extra_field="extra_value",
- another_field=123
- )
-
- assert error.err_code == "TEST"
- # Extra fields should be accessible
- assert hasattr(error, "extra_field")
- assert hasattr(error, "another_field")
-
- def test_error_response_modern_error_extra_fields_allowed(self):
- """Test that ErrorResponseModernError allows extra fields."""
- # This should not raise an error due to extra="allow"
- error = ErrorResponseModernError(
- category="test",
- custom_field="custom_value",
- numeric_field=456
- )
-
- assert error.category == "test"
- # Extra fields should be accessible
- assert hasattr(error, "custom_field")
- assert hasattr(error, "numeric_field")
-
- def test_listen_v1response_missing_required_fields(self):
- """Test that ListenV1Response raises error for missing required fields."""
- with pytest.raises(pydantic.ValidationError):
- ListenV1Response()
-
- with pytest.raises(pydantic.ValidationError):
- ListenV1Response(metadata=Mock())
-
- with pytest.raises(pydantic.ValidationError):
- ListenV1Response(results=Mock())
-
- def test_error_response_legacy_error_wrong_types(self):
- """Test that ErrorResponseLegacyError validates field types."""
- # Since all fields are Optional[str], non-string values should be handled
- # Pydantic might coerce or raise validation errors depending on the value
- try:
- error = ErrorResponseLegacyError(err_code=123) # int instead of str
- # If it doesn't raise, check if it was coerced to string
- assert isinstance(error.err_code, (str, int))
- except pydantic.ValidationError:
- # This is also acceptable behavior
- pass
-
- def test_error_response_modern_error_wrong_types(self):
- """Test that ErrorResponseModernError validates field types."""
- # Since all fields are Optional[str], non-string values should be handled
- try:
- error = ErrorResponseModernError(category=456) # int instead of str
- # If it doesn't raise, check if it was coerced to string
- assert isinstance(error.category, (str, int))
- except pydantic.ValidationError:
- # This is also acceptable behavior
- pass
-
-
-class TestTypeDefinitionIntegration:
- """Test integration scenarios with type definitions."""
-
- def test_error_response_union_type_checking(self):
- """Test that different error types can be used interchangeably."""
- errors: list[ErrorResponse] = [
- "Simple string error",
- ErrorResponseLegacyError(err_code="LEG_001", err_msg="Legacy error"),
- ErrorResponseModernError(category="modern", message="Modern error")
- ]
-
- assert len(errors) == 3
- assert isinstance(errors[0], str)
- assert isinstance(errors[1], ErrorResponseLegacyError)
- assert isinstance(errors[2], ErrorResponseModernError)
-
- def test_listen_v1model_in_function_signature(self):
- """Test using ListenV1Model in function signatures."""
- def process_model(model: ListenV1Model) -> str:
- return f"Processing model: {model}"
-
- # Test with literal values
- result1 = process_model("nova-3")
- assert result1 == "Processing model: nova-3"
-
- # Test with custom values (typing.Any allows this)
- result2 = process_model("custom-model")
- assert result2 == "Processing model: custom-model"
-
- # Test with non-string values
- result3 = process_model(123)
- assert result3 == "Processing model: 123"
-
- def test_type_definitions_serialization_consistency(self):
- """Test that type definitions serialize consistently."""
- legacy_error = ErrorResponseLegacyError(err_code="TEST", err_msg="Test message")
- modern_error = ErrorResponseModernError(category="test", message="Test message")
-
- # Both should be serializable
- try:
- legacy_dict = legacy_error.model_dump()
- except AttributeError:
- legacy_dict = legacy_error.dict()
-
- try:
- modern_dict = modern_error.model_dump()
- except AttributeError:
- modern_dict = modern_error.dict()
-
- assert isinstance(legacy_dict, dict)
- assert isinstance(modern_dict, dict)
- assert "err_code" in legacy_dict
- assert "category" in modern_dict
-
- def test_type_definitions_with_none_values(self):
- """Test type definitions with None values."""
- # Test that optional fields can be explicitly set to None
- legacy_error = ErrorResponseLegacyError(
- err_code=None,
- err_msg=None,
- request_id=None
- )
-
- modern_error = ErrorResponseModernError(
- category=None,
- message=None,
- details=None,
- request_id=None
- )
-
- assert legacy_error.err_code is None
- assert modern_error.category is None
-
- def test_type_definitions_with_unicode_values(self):
- """Test type definitions with Unicode values."""
- legacy_error = ErrorResponseLegacyError(
- err_code="ζ΅θ―_001",
- err_msg="Unicode error message: π¨",
- request_id="req_ζ΅θ―_123"
- )
-
- modern_error = ErrorResponseModernError(
- category="ΡΠ΅ΡΡ",
- message="Error with Γ©mojis: π₯",
- details="DΓ©tails de l'erreur"
- )
-
- assert legacy_error.err_code == "ζ΅θ―_001"
- assert modern_error.message == "Error with Γ©mojis: π₯"
diff --git a/websockets-reference.md b/websockets-reference.md
deleted file mode 100644
index 87634eca..00000000
--- a/websockets-reference.md
+++ /dev/null
@@ -1,1199 +0,0 @@
-# WebSocket Reference
-
-## Listen V1 Connect
-
-client.listen.v1.connect(...)
-
--
-
-#### π Description
-
-
--
-
-
--
-
-Transcribe audio and video using Deepgram's speech-to-text WebSocket
-
-
-
-
-
-
-#### π Usage
-
-
--
-
-
--
-
-```python
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-client = DeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- connection.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import ListenV1MediaMessage
- connection.send_media(ListenV1MediaMessage(audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import ListenV1ControlMessage
- connection.send_control(ListenV1ControlMessage(type="KeepAlive"))
-
-```
-
-
-
-
-
-
-#### π Async Usage
-
-
--
-
-
--
-
-```python
-import asyncio
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV1SocketClientResponse
-
-client = AsyncDeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-async def main():
- async with client.listen.v1.connect(model="nova-3") as connection:
- def on_message(message: ListenV1SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- await connection.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import ListenV1MediaMessage
- await connection.send_media(ListenV1MediaMessage(audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import ListenV1ControlMessage
- await connection.send_control(ListenV1ControlMessage(type="KeepAlive"))
-
-asyncio.run(main())
-
-```
-
-
-
-
-
-
-#### π€ Send Methods
-
-
--
-
-
--
-
-**`send_media(message)`** β Send binary audio data for transcription
-
-- `ListenV1MediaMessage(audio_bytes)`
-
-
-
-
-
--
-
-**`send_control(message)`** β Send control messages to manage the connection
-
-- `ListenV1ControlMessage(type="KeepAlive")` β Keep the connection alive
-- `ListenV1ControlMessage(type="Finalize")` β Finalize the transcription
-
-
-
-
-
-
-#### βοΈ Parameters
-
-
--
-
-
--
-
-**model:** `str` β AI model to use for the transcription
-
-
-
-
-
--
-
-**callback:** `typing.Optional[str]` β URL to which we'll make the callback request
-
-
-
-
-
--
-
-**callback_method:** `typing.Optional[str]` β HTTP method by which the callback request will be made
-
-
-
-
-
--
-
-**channels:** `typing.Optional[str]` β Number of independent audio channels contained in submitted audio
-
-
-
-
-
--
-
-**diarize:** `typing.Optional[str]` β Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0
-
-
-
-
-
--
-
-**dictation:** `typing.Optional[str]` β Dictation mode for controlling formatting with dictated speech
-
-
-
-
-
--
-
-**encoding:** `typing.Optional[str]` β Specify the expected encoding of your submitted audio
-
-
-
-
-
--
-
-**endpointing:** `typing.Optional[str]` β Control when speech recognition ends
-
-
-
-
-
--
-
-**extra:** `typing.Optional[str]` β Arbitrary key-value pairs that are attached to the API response
-
-
-
-
-
--
-
-**filler_words:** `typing.Optional[str]` β Include filler words like "uh" and "um" in transcripts
-
-
-
-
-
--
-
-**interim_results:** `typing.Optional[str]` β Return partial transcripts as audio is being processed
-
-
-
-
-
--
-
-**keyterm:** `typing.Optional[str]` β Key term prompting can boost or suppress specialized terminology and brands
-
-
-
-
-
--
-
-**keywords:** `typing.Optional[str]` β Keywords can boost or suppress specialized terminology and brands
-
-
-
-
-
--
-
-**language:** `typing.Optional[str]` β BCP-47 language tag that hints at the primary spoken language
-
-
-
-
-
--
-
-**mip_opt_out:** `typing.Optional[str]` β Opts out requests from the Deepgram Model Improvement Program
-
-
-
-
-
--
-
-**multichannel:** `typing.Optional[str]` β Transcribe each audio channel independently
-
-
-
-
-
--
-
-**numerals:** `typing.Optional[str]` β Convert numbers from written format to numerical format
-
-
-
-
-
--
-
-**profanity_filter:** `typing.Optional[str]` β Remove profanity from transcripts
-
-
-
-
-
--
-
-**punctuate:** `typing.Optional[str]` β Add punctuation and capitalization to the transcript
-
-
-
-
-
--
-
-**redact:** `typing.Optional[str]` β Redaction removes sensitive information from your transcripts
-
-
-
-
-
--
-
-**replace:** `typing.Optional[str]` β Search for terms or phrases in submitted audio and replaces them
-
-
-
-
-
--
-
-**sample_rate:** `typing.Optional[str]` β Sample rate of the submitted audio
-
-
-
-
-
--
-
-**search:** `typing.Optional[str]` β Search for terms or phrases in submitted audio
-
-
-
-
-
--
-
-**smart_format:** `typing.Optional[str]` β Apply formatting to transcript output for improved readability
-
-
-
-
-
--
-
-**tag:** `typing.Optional[str]` β Label your requests for the purpose of identification during usage reporting
-
-
-
-
-
--
-
-**utterance_end_ms:** `typing.Optional[str]` β Length of time in milliseconds of silence to wait for before finalizing speech
-
-
-
-
-
--
-
-**vad_events:** `typing.Optional[str]` β Return Voice Activity Detection events via the websocket
-
-
-
-
-
--
-
-**version:** `typing.Optional[str]` β Version of the model to use
-
-
-
-
-
--
-
-**authorization:** `typing.Optional[str]` β Use your API key for authentication, or alternatively generate a temporary token and pass it via the token query parameter.
-
-**Example:** `token %DEEPGRAM_API_KEY%` or `bearer %DEEPGRAM_TOKEN%`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` β Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-## Listen V2 Connect
-
-client.listen.v2.connect(...)
-
--
-
-#### π Description
-
-
--
-
-
--
-
-Real-time conversational speech recognition with contextual turn detection for natural voice conversations
-
-
-
-
-
-
-#### π Usage
-
-
--
-
-
--
-
-```python
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-client = DeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-with client.listen.v2.connect(
- model="flux-general-en",
- encoding="linear16",
- sample_rate="16000"
-) as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- connection.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import ListenV2MediaMessage
- connection.send_media(ListenV2MediaMessage(data=audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import ListenV2ControlMessage
- connection.send_control(ListenV2ControlMessage(type="CloseStream"))
-
-```
-
-
-
-
-
-
-#### π Async Usage
-
-
--
-
-
--
-
-```python
-import asyncio
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import ListenV2SocketClientResponse
-
-client = AsyncDeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-async def main():
- async with client.listen.v2.connect(
- model="flux-general-en",
- encoding="linear16",
- sample_rate="16000"
- ) as connection:
- def on_message(message: ListenV2SocketClientResponse) -> None:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- await connection.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import ListenV2MediaMessage
- await connection.send_media(ListenV2MediaMessage(data=audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import ListenV2ControlMessage
- await connection.send_control(ListenV2ControlMessage(type="CloseStream"))
-
-asyncio.run(main())
-
-```
-
-
-
-
-
-
-#### π€ Send Methods
-
-
--
-
-
--
-
-**`send_media(message)`** β Send binary audio data for transcription
-
-- `ListenV2MediaMessage(data=audio_bytes)`
-
-
-
-
-
--
-
-**`send_control(message)`** β Send control messages to manage the connection
-
-- `ListenV2ControlMessage(type="CloseStream")` β Close the audio stream
-
-
-
-
-
-
-#### βοΈ Parameters
-
-
--
-
-
--
-
-**model:** `str` β AI model used to process submitted audio
-
-
-
-
-
--
-
-**encoding:** `str` β Specify the expected encoding of your submitted audio
-
-
-
-
-
--
-
-**sample_rate:** `str` β Sample rate of the submitted audio
-
-
-
-
-
--
-
-**eager_eot_threshold:** `typing.Optional[str]` β Threshold for eager end-of-turn detection
-
-
-
-
-
--
-
-**eot_threshold:** `typing.Optional[str]` β Threshold for end-of-turn detection
-
-
-
-
-
--
-
-**eot_timeout_ms:** `typing.Optional[str]` β Timeout in milliseconds for end-of-turn detection
-
-
-
-
-
--
-
-**keyterm:** `typing.Optional[str]` β Key term prompting can boost or suppress specialized terminology and brands
-
-
-
-
-
--
-
-**mip_opt_out:** `typing.Optional[str]` β Opts out requests from the Deepgram Model Improvement Program
-
-
-
-
-
--
-
-**tag:** `typing.Optional[str]` β Label your requests for the purpose of identification during usage reporting
-
-
-
-
-
--
-
-**authorization:** `typing.Optional[str]` β Use your API key for authentication, or alternatively generate a temporary token and pass it via the token query parameter.
-
-**Example:** `token %DEEPGRAM_API_KEY%` or `bearer %DEEPGRAM_TOKEN%`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` β Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-## Speak V1 Connect
-
-client.speak.v1.connect(...)
-
--
-
-#### π Description
-
-
--
-
-
--
-
-Convert text into natural-sounding speech using Deepgram's TTS WebSocket
-
-
-
-
-
-
-#### π Usage
-
-
--
-
-
--
-
-```python
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-client = DeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-with client.speak.v1.connect(
- model="aura-2-asteria-en",
- encoding="linear16",
- sample_rate=24000
-) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- connection.start_listening()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1TextMessage
- connection.send_text(SpeakV1TextMessage(text="Hello, world!"))
-
- # Send control messages
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- connection.send_control(SpeakV1ControlMessage(type="Flush"))
- connection.send_control(SpeakV1ControlMessage(type="Close"))
-
-```
-
-
-
-
-
-
-#### π Async Usage
-
-
--
-
-
--
-
-```python
-import asyncio
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import SpeakV1SocketClientResponse
-
-client = AsyncDeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-async def main():
- async with client.speak.v1.connect(
- model="aura-2-asteria-en",
- encoding="linear16",
- sample_rate=24000
- ) as connection:
- def on_message(message: SpeakV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- connection.on(EventType.OPEN, lambda _: print("Connection opened"))
- connection.on(EventType.MESSAGE, on_message)
- connection.on(EventType.CLOSE, lambda _: print("Connection closed"))
- connection.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- await connection.start_listening()
-
- # Send text to be converted to speech
- from deepgram.extensions.types.sockets import SpeakV1TextMessage
- await connection.send_text(SpeakV1TextMessage(text="Hello, world!"))
-
- # Send control messages
- from deepgram.extensions.types.sockets import SpeakV1ControlMessage
- await connection.send_control(SpeakV1ControlMessage(type="Flush"))
- await connection.send_control(SpeakV1ControlMessage(type="Close"))
-
-asyncio.run(main())
-
-```
-
-
-
-
-
-
-#### π€ Send Methods
-
-
--
-
-
--
-
-**`send_text(message)`** β Send text to be converted to speech
-
-- `SpeakV1TextMessage(text="Hello, world!")`
-
-
-
-
-
--
-
-**`send_control(message)`** β Send control messages to manage speech synthesis
-
-- `SpeakV1ControlMessage(type="Flush")` β Process all queued text immediately
-- `SpeakV1ControlMessage(type="Clear")` β Clear the text queue
-- `SpeakV1ControlMessage(type="Close")` β Close the connection
-
-
-
-
-
-
-#### βοΈ Parameters
-
-
--
-
-
--
-
-**encoding:** `typing.Optional[str]` β Specify the expected encoding of your output audio
-
-
-
-
-
--
-
-**mip_opt_out:** `typing.Optional[str]` β Opts out requests from the Deepgram Model Improvement Program
-
-
-
-
-
--
-
-**model:** `typing.Optional[str]` β AI model used to process submitted text
-
-
-
-
-
--
-
-**sample_rate:** `typing.Optional[str]` β Sample rate for the output audio
-
-
-
-
-
--
-
-**authorization:** `typing.Optional[str]` β Use your API key for authentication, or alternatively generate a temporary token and pass it via the token query parameter.
-
-**Example:** `token %DEEPGRAM_API_KEY%` or `bearer %DEEPGRAM_TOKEN%`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` β Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-## Agent V1 Connect
-
-client.agent.v1.connect(...)
-
--
-
-#### π Description
-
-
--
-
-
--
-
-Build a conversational voice agent using Deepgram's Voice Agent WebSocket
-
-
-
-
-
-
-#### π Usage
-
-
--
-
-
--
-
-```python
-from deepgram import DeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SocketClientResponse,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-client = DeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-with client.agent.v1.connect() as agent:
- # Configure the agent
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=44100,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- ),
- prompt='Reply only and explicitly with "OK".',
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- agent.send_settings(settings)
-
- def on_message(message: AgentV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- agent.on(EventType.OPEN, lambda _: print("Connection opened"))
- agent.on(EventType.MESSAGE, on_message)
- agent.on(EventType.CLOSE, lambda _: print("Connection closed"))
- agent.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- agent.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import AgentV1MediaMessage
- agent.send_media(AgentV1MediaMessage(data=audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import AgentV1ControlMessage
- agent.send_control(AgentV1ControlMessage(type="KeepAlive"))
-
-```
-
-
-
-
-
-
-#### π Async Usage
-
-
--
-
-
--
-
-```python
-import asyncio
-from deepgram import AsyncDeepgramClient
-from deepgram.core.events import EventType
-from deepgram.extensions.types.sockets import (
- AgentV1Agent,
- AgentV1AudioConfig,
- AgentV1AudioInput,
- AgentV1DeepgramSpeakProvider,
- AgentV1Listen,
- AgentV1ListenProvider,
- AgentV1OpenAiThinkProvider,
- AgentV1SettingsMessage,
- AgentV1SocketClientResponse,
- AgentV1SpeakProviderConfig,
- AgentV1Think,
-)
-
-client = AsyncDeepgramClient(
- api_key="YOUR_API_KEY",
-)
-
-async def main():
- async with client.agent.v1.connect() as agent:
- # Configure the agent
- settings = AgentV1SettingsMessage(
- audio=AgentV1AudioConfig(
- input=AgentV1AudioInput(
- encoding="linear16",
- sample_rate=16000,
- )
- ),
- agent=AgentV1Agent(
- listen=AgentV1Listen(
- provider=AgentV1ListenProvider(
- type="deepgram",
- model="nova-3",
- smart_format=True,
- )
- ),
- think=AgentV1Think(
- provider=AgentV1OpenAiThinkProvider(
- type="open_ai",
- model="gpt-4o-mini",
- temperature=0.7,
- )
- ),
- speak=AgentV1SpeakProviderConfig(
- provider=AgentV1DeepgramSpeakProvider(
- type="deepgram",
- model="aura-2-asteria-en",
- )
- ),
- ),
- )
-
- await agent.send_settings(settings)
-
- def on_message(message: AgentV1SocketClientResponse) -> None:
- if isinstance(message, bytes):
- print("Received audio event")
- else:
- msg_type = getattr(message, "type", "Unknown")
- print(f"Received {msg_type} event")
-
- agent.on(EventType.OPEN, lambda _: print("Connection opened"))
- agent.on(EventType.MESSAGE, on_message)
- agent.on(EventType.CLOSE, lambda _: print("Connection closed"))
- agent.on(EventType.ERROR, lambda error: print(f"Caught: {error}"))
-
- # Start listening
- await agent.start_listening()
-
- # Send audio data
- from deepgram.extensions.types.sockets import AgentV1MediaMessage
- await agent.send_media(AgentV1MediaMessage(data=audio_bytes))
-
- # Send control messages
- from deepgram.extensions.types.sockets import AgentV1ControlMessage
- await agent.send_control(AgentV1ControlMessage(type="KeepAlive"))
-
-asyncio.run(main())
-
-```
-
-
-
-
-
-
-#### βοΈ Parameters
-
-
--
-
-
--
-
-**authorization:** `typing.Optional[str]` β Use your API key for authentication, or alternatively generate a temporary token and pass it via the token query parameter.
-
-**Example:** `token %DEEPGRAM_API_KEY%` or `bearer %DEEPGRAM_TOKEN%`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` β Request-specific configuration.
-
-
-
-
-
-
-#### π€ Send Methods
-
-
--
-
-
--
-
-**`send_settings(message)`** β Send initial agent configuration settings
-
-- `AgentV1SettingsMessage(...)` β Configure audio, listen, think, and speak providers
-
-
-
-
-
--
-
-**`send_media(message)`** β Send binary audio data to the agent
-
-- `AgentV1MediaMessage(data=audio_bytes)`
-
-
-
-
-
--
-
-**`send_control(message)`** β Send control messages (keep_alive, etc.)
-
-- `AgentV1ControlMessage(type="KeepAlive")`
-
-
-
-
-
--
-
-**`send_update_speak(message)`** β Update the agent's speech synthesis settings
-
-- `AgentV1UpdateSpeakMessage(...)` β Modify TTS configuration during conversation
-
-
-
-
-
--
-
-**`send_update_prompt(message)`** β Update the agent's system prompt
-
-- `AgentV1UpdatePromptMessage(...)` β Change the agent's behavior instructions
-
-
-
-
-
--
-
-**`send_inject_user_message(message)`** β Inject a user message into the conversation
-
-- `AgentV1InjectUserMessageMessage(...)` β Add a simulated user input
-
-
-
-
-
--
-
-**`send_inject_agent_message(message)`** β Inject an agent message into the conversation
-
-- `AgentV1InjectAgentMessageMessage(...)` β Add a simulated agent response
-
-
-
-
-
--
-
-**`send_function_call_response(message)`** β Send the result of a function call back to the agent
-
-- `AgentV1FunctionCallResponseMessage(...)` β Provide function execution results
-
-
-
-
-
-
-
-
-