diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000..763462f
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,17 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/debian
+{
+ "name": "Development",
+ "image": "mcr.microsoft.com/devcontainers/typescript-node:latest",
+ "features": {
+ "ghcr.io/devcontainers/features/node:1": {}
+ },
+ "postCreateCommand": "yarn install",
+ "customizations": {
+ "vscode": {
+ "extensions": [
+ "esbenp.prettier-vscode"
+ ]
+ }
+ }
+}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 139559e..2704edb 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,4 +2,4 @@
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
-* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @ehhuang @reluctantfuturist
+* @ashwinb @raghotham @ehhuang @reluctantfuturist
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..46e7d1c
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,89 @@
+name: CI
+on:
+ push:
+ branches-ignore:
+ - 'generated'
+ - 'codegen/**'
+ - 'integrated/**'
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
+ pull_request:
+ branches-ignore:
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
+
+jobs:
+ lint:
+ timeout-minutes: 10
+ name: lint
+ runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Bootstrap
+ run: ./scripts/bootstrap
+
+ - name: Check types
+ run: ./scripts/lint
+
+ build:
+ timeout-minutes: 5
+ name: build
+ runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ permissions:
+ contents: read
+ id-token: write
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Bootstrap
+ run: ./scripts/bootstrap
+
+ - name: Check build
+ run: ./scripts/build
+
+ - name: Get GitHub OIDC Token
+ if: github.repository == 'stainless-sdks/llama-stack-client-node'
+ id: github-oidc
+ uses: actions/github-script@v6
+ with:
+ script: core.setOutput('github_token', await core.getIDToken());
+
+ - name: Upload tarball
+ if: github.repository == 'stainless-sdks/llama-stack-client-node'
+ env:
+ URL: https://pkg.stainless.com/s
+ AUTH: ${{ steps.github-oidc.outputs.github_token }}
+ SHA: ${{ github.sha }}
+ run: ./scripts/utils/upload-artifact.sh
+
+ test:
+ timeout-minutes: 10
+ name: test
+ runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
+
+ - name: Bootstrap
+ run: ./scripts/bootstrap
+
+ - name: Run tests
+ run: ./scripts/test
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
new file mode 100644
index 0000000..625635a
--- /dev/null
+++ b/.github/workflows/release-doctor.yml
@@ -0,0 +1,21 @@
+name: Release Doctor
+on:
+ pull_request:
+ branches:
+ - main
+ workflow_dispatch:
+
+jobs:
+ release_doctor:
+ name: release doctor
+ runs-on: ubuntu-latest
+ if: github.repository == 'llamastack/llama-stack-client-typescript' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Check release environment
+ run: |
+ bash ./bin/check-release-environment
+ env:
+
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
new file mode 100644
index 0000000..ca1d94e
--- /dev/null
+++ b/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "0.2.17"
+}
diff --git a/.stats.yml b/.stats.yml
index 9f04884..bd77bef 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,4 @@
-configured_endpoints: 74
-openapi_spec_url: https://github.com/meta-llama/llama-stack/blob/main/docs/resources/llama-stack-spec.yaml
+configured_endpoints: 106
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f59f1c7d33001d60b5190f68aa49eacec90f05dbe694620b8916152c3922051d.yml
+openapi_spec_hash: 804edd2e834493906dc430145402be3b
+config_hash: de16e52db65de71ac35adcdb665a74f5
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..f3992c6
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,104 @@
+# Changelog
+
+## 0.2.17 (2025-08-06)
+
+Full Changelog: [v0.2.15...v0.2.17](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.15...v0.2.17)
+
+### Features
+
+* **api:** update via SDK Studio ([9803419](https://github.com/llamastack/llama-stack-client-typescript/commit/98034195897ff31be9164761450bcab933e381cc))
+* **api:** update via SDK Studio ([57bb86c](https://github.com/llamastack/llama-stack-client-typescript/commit/57bb86c95fa5925661f243fb9c9e953ac451a392))
+* **api:** update via SDK Studio ([40c2189](https://github.com/llamastack/llama-stack-client-typescript/commit/40c218958db8991a7483ed9ace4242d171770d42))
+* **api:** update via SDK Studio ([26b572d](https://github.com/llamastack/llama-stack-client-typescript/commit/26b572d92a150ef1ee25ec6efd0e9bd38f321072))
+* **api:** update via SDK Studio ([8a48a6f](https://github.com/llamastack/llama-stack-client-typescript/commit/8a48a6fe63d13817953c2acb4fbf5b4ab6136f4a))
+* **api:** update via SDK Studio ([3ea8a73](https://github.com/llamastack/llama-stack-client-typescript/commit/3ea8a73c9d8e66bbc3650aa7e6a19a4ce07f30c5))
+* **api:** update via SDK Studio ([cddd18f](https://github.com/llamastack/llama-stack-client-typescript/commit/cddd18fb70e3830d7062d12aab4754c3e598bbd2))
+* **api:** update via SDK Studio ([fc4fbf9](https://github.com/llamastack/llama-stack-client-typescript/commit/fc4fbf94810db7f89288cc36780d2616c8fc715a))
+* **api:** update via SDK Studio ([2a981d4](https://github.com/llamastack/llama-stack-client-typescript/commit/2a981d45f801bdf82e43c6d7d7c6674cc03cadc3))
+* **api:** update via SDK Studio ([14544ce](https://github.com/llamastack/llama-stack-client-typescript/commit/14544ce36d7a33509af85783a421d9c1995e22d1))
+* **api:** update via SDK Studio ([57c0764](https://github.com/llamastack/llama-stack-client-typescript/commit/57c07641906fc04eb9eadfd12f672e28a3a2efbc))
+* **api:** update via SDK Studio ([426728c](https://github.com/llamastack/llama-stack-client-typescript/commit/426728c7f86ce3385eb8c116f41a5b192abd5d0c))
+
+
+### Bug Fixes
+
+* **ci:** update version, skip a failing test ([#4](https://github.com/llamastack/llama-stack-client-typescript/issues/4)) ([7a5dbe7](https://github.com/llamastack/llama-stack-client-typescript/commit/7a5dbe7ed59b24feda5d73df8808fde2d337fc2a))
+* **client:** don't send `Content-Type` for bodyless methods ([6806e8e](https://github.com/llamastack/llama-stack-client-typescript/commit/6806e8ef31302a0f2ca0ab9ae36e4781e5f0adf7))
+
+
+### Chores
+
+* **internal:** codegen related update ([267a378](https://github.com/llamastack/llama-stack-client-typescript/commit/267a378b1999abd5f17f08b5792ee99d9c405439))
+* **internal:** codegen related update ([79e7896](https://github.com/llamastack/llama-stack-client-typescript/commit/79e78969a31df16ef35901c3ce4c003f70d59778))
+* **internal:** remove redundant imports config ([f19eb25](https://github.com/llamastack/llama-stack-client-typescript/commit/f19eb258d836c7de4fb719c62dabcbfb502ecc6c))
+* make some internal functions async ([e2797ae](https://github.com/llamastack/llama-stack-client-typescript/commit/e2797ae1e88960ffa5b13a89103d4ee9972803f9))
+* **mcp:** rework imports in tools ([9486e73](https://github.com/llamastack/llama-stack-client-typescript/commit/9486e7319d36cb8efe86568884057c65e91d84b2))
+* mention unit type in timeout docs ([c2b9867](https://github.com/llamastack/llama-stack-client-typescript/commit/c2b986793dd9f2fa55e8f4ce9c463a4d99635ab4))
+
+
+### Build System
+
+* Bump version to 0.2.14 ([4d2c696](https://github.com/llamastack/llama-stack-client-typescript/commit/4d2c696b916c9868be61fff31f008442cb346eca))
+* Bump version to 0.2.15 ([6645f62](https://github.com/llamastack/llama-stack-client-typescript/commit/6645f629844fd24b7e8b8fab9089cf1ba7cb9352))
+* Bump version to 0.2.16 ([fe9ab2e](https://github.com/llamastack/llama-stack-client-typescript/commit/fe9ab2e081df8f9f254b74e3bc42ac880dda765f))
+* Bump version to 0.2.17 ([7da4cca](https://github.com/llamastack/llama-stack-client-typescript/commit/7da4cca39c982d6f3f07fa09a9428983d233bc5e))
+
+## 0.1.0-alpha.3 (2025-06-28)
+
+Full Changelog: [v0.1.0-alpha.2...v0.1.0-alpha.3](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.1.0-alpha.2...v0.1.0-alpha.3)
+
+### Chores
+
+* **ci:** only run for pushes and fork pull requests ([70cf3b4](https://github.com/llamastack/llama-stack-client-typescript/commit/70cf3b4cfe81f5d4757f05ea0372342c9c8ce08b))
+
+## 0.1.0-alpha.2 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.1.0-alpha.1...v0.1.0-alpha.2)
+
+### Features
+
+* **api:** update via SDK Studio ([a00f961](https://github.com/llamastack/llama-stack-client-typescript/commit/a00f961a3a4a8961cd54ad6a92a52aa34cb0d041))
+* **api:** update via SDK Studio ([bef1e47](https://github.com/llamastack/llama-stack-client-typescript/commit/bef1e47ad9fe9a03e8ffdaa632981c0666919b73))
+* **api:** update via SDK Studio ([7fb44fa](https://github.com/llamastack/llama-stack-client-typescript/commit/7fb44fab41cd95410115d12a7855fd12fbd3b34c))
+
+## 0.1.0-alpha.1 (2025-06-27)
+
+Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.0.1-alpha.0...v0.1.0-alpha.1)
+
+### Features
+
+* **client:** add support for endpoint-specific base URLs ([4c942da](https://github.com/llamastack/llama-stack-client-typescript/commit/4c942da59c2e3d40b9dacd8198e52ee60b403849))
+
+
+### Bug Fixes
+
+* **client:** always overwrite when merging headers ([31ec06d](https://github.com/llamastack/llama-stack-client-typescript/commit/31ec06d09d5143cb2b545114a9436059e06e78d4))
+* **client:** explicitly copy fetch in withOptions ([aa0e2a6](https://github.com/llamastack/llama-stack-client-typescript/commit/aa0e2a685e75c31678dbef7be8381ce55ff01800))
+* **client:** get fetchOptions type more reliably ([5e30a99](https://github.com/llamastack/llama-stack-client-typescript/commit/5e30a9916c22bfb4d00bfaafa27449fb07fd8f68))
+* compat with more runtimes ([625a6db](https://github.com/llamastack/llama-stack-client-typescript/commit/625a6db4c7d07936c854cbddc17b859290f9f2c4))
+* publish script — handle NPM errors correctly ([39a151f](https://github.com/llamastack/llama-stack-client-typescript/commit/39a151fe741ebce64d96ee80c6abe954a4b7f92d))
+
+
+### Chores
+
+* adjust eslint.config.mjs ignore pattern ([f0198eb](https://github.com/llamastack/llama-stack-client-typescript/commit/f0198ebf4d831ecc7089b382e1ab8317d7caec34))
+* avoid type error in certain environments ([c120307](https://github.com/llamastack/llama-stack-client-typescript/commit/c12030797aeb66958347d1c29d47e6bde73c6d19))
+* change publish docs url ([8165807](https://github.com/llamastack/llama-stack-client-typescript/commit/8165807d5c54cd91549ec66e127e0c5afd2d595d))
+* **ci:** enable for pull requests ([85ff8d9](https://github.com/llamastack/llama-stack-client-typescript/commit/85ff8d9c3b928405c85f682b1c56c22340efabc8))
+* **client:** refactor imports ([b2ab744](https://github.com/llamastack/llama-stack-client-typescript/commit/b2ab74493d3d528f3db9bf84a7af3ffe291efa54))
+* **deps:** bump eslint-plugin-prettier ([1041139](https://github.com/llamastack/llama-stack-client-typescript/commit/104113998e2c3412112a49d75596c4496d58fd43))
+* **docs:** grammar improvements ([461216e](https://github.com/llamastack/llama-stack-client-typescript/commit/461216eaac75ed802adb8cda21d5f88498fbadcc))
+* **docs:** use top-level-await in example snippets ([74b5549](https://github.com/llamastack/llama-stack-client-typescript/commit/74b5549f48e82f05e5b507393026542d939a6b27))
+* improve publish-npm script --latest tag logic ([5dd9d90](https://github.com/llamastack/llama-stack-client-typescript/commit/5dd9d9031ded40d4d20ef3fb2aa101f743f7b593))
+* **internal:** add pure annotations, make base APIResource abstract ([c239e7d](https://github.com/llamastack/llama-stack-client-typescript/commit/c239e7dad3fa8254cb90ea78a93d8aad5e3b90be))
+* **internal:** fix readablestream types in node 20 ([287f657](https://github.com/llamastack/llama-stack-client-typescript/commit/287f657d36d0548502f12802b8ea17f627da1f20))
+* **internal:** update jest config ([a36fe70](https://github.com/llamastack/llama-stack-client-typescript/commit/a36fe70319c6a033a9deedee714102bee04c97e1))
+* **package:** remove engines ([6066770](https://github.com/llamastack/llama-stack-client-typescript/commit/6066770fb1c17521dcdc2237156ba88b42beed94))
+* **readme:** update badges ([5239745](https://github.com/llamastack/llama-stack-client-typescript/commit/5239745b18dded8a88500cac31138bd170470fc9))
+* **readme:** use better example snippet for undocumented params ([e035b8f](https://github.com/llamastack/llama-stack-client-typescript/commit/e035b8f9ac69949d6cc897be9f3bd221d8afed7e))
+* update SDK settings ([e7d2cfc](https://github.com/llamastack/llama-stack-client-typescript/commit/e7d2cfcc355eb5990ef5e750cb18ace391e75b5b))
+
+
+### Refactors
+
+* **types:** replace Record with mapped types ([ef71453](https://github.com/llamastack/llama-stack-client-typescript/commit/ef7145362e215ac5dffbeb59ca3fdc944edfe183))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 15f9977..86fc0a6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -42,15 +42,15 @@ If you’d like to use the repository from source, you can either install from g
To install via git:
```sh
-$ npm install git+ssh://git@github.com:stainless-sdks/llama-stack-node.git
+$ npm install git+ssh://git@github.com:llamastack/llama-stack-client-typescript.git
```
Alternatively, to link a local copy of the repo:
```sh
# Clone
-$ git clone https://www.github.com/stainless-sdks/llama-stack-node
-$ cd llama-stack-node
+$ git clone https://www.github.com/llamastack/llama-stack-client-typescript
+$ cd llama-stack-client-typescript
# With yarn
$ yarn link
diff --git a/LICENSE b/LICENSE
index aab621b..1ace7ad 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,22 +1,7 @@
-MIT License
+Copyright 2025 llama-stack-client
-Copyright (c) Meta Platforms, Inc. and affiliates
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/README.md b/README.md
index 3d36090..a27b8c1 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,22 @@
-# Llama Stack Client TypeScript and JavaScript API Library
+# Llama Stack Client Node API Library
-[](https://npmjs.org/package/llama-stack-client)  [](https://discord.gg/llama-stack)
+[](https://npmjs.org/package/llama-stack-client) 
This library provides convenient access to the Llama Stack Client REST API from server-side TypeScript or JavaScript.
-The REST API documentation can be found on [https://llama-stack.readthedocs.io/en/latest/references/api_reference/index.html](https://llama-stack.readthedocs.io/en/latest/references/api_reference/index.html). The full API of this library can be found in [api.md](api.md).
+The REST API documentation can be found on [llama-stack.readthedocs.io](https://llama-stack.readthedocs.io/en/latest/). The full API of this library can be found in [api.md](api.md).
-It is generated with [Stainless](https://www.stainlessapi.com/).
+It is generated with [Stainless](https://www.stainless.com/).
## Installation
```sh
-npm install llama-stack-client
+npm install git+ssh://git@github.com:llamastack/llama-stack-client-typescript.git
```
+> [!NOTE]
+> Once this package is [published to npm](https://www.stainless.com/docs/guides/publish), this will become: `npm install llama-stack-client`
+
## Usage
The full API of this library can be found in [api.md](api.md).
@@ -22,17 +25,11 @@ The full API of this library can be found in [api.md](api.md).
```js
import LlamaStackClient from 'llama-stack-client';
-const client = new LlamaStackClient({
- baseURL: 'http://localhost:8321'
-});
-
-async function main() {
- const models = await client.models.list();
+const client = new LlamaStackClient();
- console.log(models);
-}
+const model = await client.models.register({ model_id: 'model_id' });
-main();
+console.log(model.identifier);
```
## Streaming responses
@@ -46,11 +43,11 @@ const client = new LlamaStackClient();
const stream = await client.inference.chatCompletion({
messages: [{ content: 'string', role: 'user' }],
- model_id: 'meta-llama/Llama-3.2-3B-Instruct',
+ model_id: 'model_id',
stream: true,
});
-for await (const inferenceChatCompletionResponse of stream) {
- process.stdout.write(inferenceChatCompletionResponse.event.delta.text || '');
+for await (const chatCompletionResponseStreamChunk of stream) {
+ console.log(chatCompletionResponseStreamChunk.completion_message);
}
```
@@ -67,21 +64,47 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-async function main() {
- const params: LlamaStackClient.InferenceChatCompletionParams = {
- messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
- };
- const response: LlamaStackClient.InferenceChatCompletionResponse = await client.inference.chatCompletion(
- params,
- );
-}
-
-main();
+const params: LlamaStackClient.InferenceChatCompletionParams = {
+ messages: [{ content: 'string', role: 'user' }],
+ model_id: 'model_id',
+};
+const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion(
+ params,
+);
```
Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors.
+## File uploads
+
+Request parameters that correspond to file uploads can be passed in many different forms:
+
+- `File` (or an object with the same structure)
+- a `fetch` `Response` (or an object with the same structure)
+- an `fs.ReadStream`
+- the return value of our `toFile` helper
+
+```ts
+import fs from 'fs';
+import fetch from 'node-fetch';
+import LlamaStackClient, { toFile } from 'llama-stack-client';
+
+const client = new LlamaStackClient();
+
+// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
+await client.files.create({ file: fs.createReadStream('/path/to/file'), purpose: 'assistants' });
+
+// Or if you have the web `File` API you can pass a `File` instance:
+await client.files.create({ file: new File(['my bytes'], 'file'), purpose: 'assistants' });
+
+// You can also pass a `fetch` `Response`:
+await client.files.create({ file: await fetch('https://somesite/file'), purpose: 'assistants' });
+
+// Finally, if none of the above are convenient, you can use our `toFile` helper:
+await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file'), purpose: 'assistants' });
+await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file'), purpose: 'assistants' });
+```
+
## Handling errors
When the library is unable to connect to the API,
@@ -90,24 +113,20 @@ a subclass of `APIError` will be thrown:
```ts
-async function main() {
- const response = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
- .catch(async (err) => {
- if (err instanceof LlamaStackClient.APIError) {
- console.log(err.status); // 400
- console.log(err.name); // BadRequestError
- console.log(err.headers); // {server: 'nginx', ...}
- } else {
- throw err;
- }
- });
-}
-
-main();
+const chatCompletionResponse = await client.inference
+ .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+ .catch(async (err) => {
+ if (err instanceof LlamaStackClient.APIError) {
+ console.log(err.status); // 400
+ console.log(err.name); // BadRequestError
+ console.log(err.headers); // {server: 'nginx', ...}
+ } else {
+ throw err;
+ }
+ });
```
-Error codes are as followed:
+Error codes are as follows:
| Status Code | Error Type |
| ----------- | -------------------------- |
@@ -180,11 +199,11 @@ const response = await client.inference
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: response, response: raw } = await client.inference
+const { data: chatCompletionResponse, response: raw } = await client.inference
.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(response);
+console.log(chatCompletionResponse.completion_message);
```
### Making custom/undocumented requests
@@ -247,7 +266,7 @@ import LlamaStackClient from 'llama-stack-client';
```
To do the inverse, add `import "llama-stack-client/shims/node"` (which does import polyfills).
-This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/stainless-sdks/llama-stack-node/tree/main/src/_shims#readme)).
+This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/llamastack/llama-stack-client-typescript/tree/main/src/_shims#readme)).
### Logging and middleware
@@ -306,7 +325,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
-We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/llama-stack-node/issues) with questions, bugs, or suggestions.
+We are keen for your feedback; please open an [issue](https://www.github.com/llamastack/llama-stack-client-typescript/issues) with questions, bugs, or suggestions.
## Requirements
diff --git a/SECURITY.md b/SECURITY.md
index 4ac2027..1b5f3a4 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -2,9 +2,9 @@
## Reporting Security Issues
-This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken.
+This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken.
-To report a security issue, please contact the Stainless team at security@stainlessapi.com.
+To report a security issue, please contact the Stainless team at security@stainless.com.
## Responsible Disclosure
@@ -16,11 +16,11 @@ before making any information public.
## Reporting Non-SDK Related Security Issues
If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by Llama Stack Client please follow the respective company's security reporting guidelines.
+or products provided by Llama Stack Client, please follow the respective company's security reporting guidelines.
### Llama Stack Client Terms and Policies
-Please contact llamastack@meta.com for any questions or concerns regarding security of our services.
+Please contact llamastack@meta.com for any questions or concerns regarding the security of our services.
---
diff --git a/api.md b/api.md
index 051d80e..df16b33 100644
--- a/api.md
+++ b/api.md
@@ -4,6 +4,7 @@ Types:
- AgentConfig
- BatchCompletion
+- ChatCompletionResponse
- CompletionMessage
- ContentDelta
- Document
@@ -21,9 +22,9 @@ Types:
- ScoringResult
- SystemMessage
- ToolCall
+- ToolCallOrString
- ToolParamDefinition
- ToolResponseMessage
-- URL
- UserMessage
# Toolgroups
@@ -60,11 +61,12 @@ Types:
- ToolDef
- ToolInvocationResult
+- ToolRuntimeListToolsResponse
Methods:
- client.toolRuntime.invokeTool({ ...params }) -> ToolInvocationResult
-- client.toolRuntime.listTools({ ...params }) -> JSONLDecoder<ToolDef>
+- client.toolRuntime.listTools({ ...params }) -> ToolRuntimeListToolsResponse
## RagTool
@@ -73,6 +75,30 @@ Methods:
- client.toolRuntime.ragTool.insert({ ...params }) -> void
- client.toolRuntime.ragTool.query({ ...params }) -> QueryResult
+# Responses
+
+Types:
+
+- ResponseObject
+- ResponseObjectStream
+- ResponseListResponse
+
+Methods:
+
+- client.responses.create({ ...params }) -> ResponseObject
+- client.responses.retrieve(responseId) -> ResponseObject
+- client.responses.list({ ...params }) -> ResponseListResponse
+
+## InputItems
+
+Types:
+
+- InputItemListResponse
+
+Methods:
+
+- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
+
# Agents
Types:
@@ -83,10 +109,14 @@ Types:
- ToolExecutionStep
- ToolResponse
- AgentCreateResponse
+- AgentRetrieveResponse
+- AgentListResponse
Methods:
- client.agents.create({ ...params }) -> AgentCreateResponse
+- client.agents.retrieve(agentId) -> AgentRetrieveResponse
+- client.agents.list({ ...params }) -> AgentListResponse
- client.agents.delete(agentId) -> void
## Session
@@ -95,11 +125,13 @@ Types:
- Session
- SessionCreateResponse
+- SessionListResponse
Methods:
- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
+- client.agents.session.list(agentId, { ...params }) -> SessionListResponse
- client.agents.session.delete(agentId, sessionId) -> void
## Steps
@@ -116,26 +148,16 @@ Methods:
Types:
+- AgentTurnResponseStreamChunk
- Turn
- TurnResponseEvent
- TurnResponseEventPayload
-- TurnCreateResponse
Methods:
-- client.agents.turn.create(agentId, sessionId, { ...params }) -> TurnCreateResponse
+- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
-
-# BatchInference
-
-Types:
-
-- BatchInferenceChatCompletionResponse
-
-Methods:
-
-- client.batchInference.chatCompletion({ ...params }) -> BatchInferenceChatCompletionResponse
-- client.batchInference.completion({ ...params }) -> BatchCompletion
+- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
# Datasets
@@ -144,39 +166,41 @@ Types:
- ListDatasetsResponse
- DatasetRetrieveResponse
- DatasetListResponse
+- DatasetIterrowsResponse
+- DatasetRegisterResponse
Methods:
-- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse | null
+- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
- client.datasets.list() -> DatasetListResponse
-- client.datasets.register({ ...params }) -> void
+- client.datasets.appendrows(datasetId, { ...params }) -> void
+- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
+- client.datasets.register({ ...params }) -> DatasetRegisterResponse
- client.datasets.unregister(datasetId) -> void
# Eval
Types:
+- BenchmarkConfig
- EvalCandidate
-- EvalTaskConfig
- EvaluateResponse
- Job
Methods:
-- client.eval.evaluateRows(taskId, { ...params }) -> EvaluateResponse
-- client.eval.runEval(taskId, { ...params }) -> Job
+- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
+- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
+- client.eval.runEval(benchmarkId, { ...params }) -> Job
+- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
## Jobs
-Types:
-
-- JobStatusResponse
-
Methods:
-- client.eval.jobs.retrieve(taskId, jobId) -> EvaluateResponse
-- client.eval.jobs.cancel(taskId, jobId) -> void
-- client.eval.jobs.status(taskId, jobId) -> JobStatusResponse | null
+- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
+- client.eval.jobs.cancel(benchmarkId, jobId) -> void
+- client.eval.jobs.status(benchmarkId, jobId) -> Job
# Inspect
@@ -196,18 +220,60 @@ Methods:
Types:
+- ChatCompletionResponseStreamChunk
- CompletionResponse
- EmbeddingsResponse
- TokenLogProbs
-- InferenceChatCompletionResponse
-- InferenceCompletionResponse
+- InferenceBatchChatCompletionResponse
Methods:
-- client.inference.chatCompletion({ ...params }) -> InferenceChatCompletionResponse
-- client.inference.completion({ ...params }) -> InferenceCompletionResponse
+- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse
+- client.inference.batchCompletion({ ...params }) -> BatchCompletion
+- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse
+- client.inference.completion({ ...params }) -> CompletionResponse
- client.inference.embeddings({ ...params }) -> EmbeddingsResponse
+# Embeddings
+
+Types:
+
+- CreateEmbeddingsResponse
+
+Methods:
+
+- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
+
+# Chat
+
+Types:
+
+- ChatCompletionChunk
+
+## Completions
+
+Types:
+
+- CompletionCreateResponse
+- CompletionRetrieveResponse
+- CompletionListResponse
+
+Methods:
+
+- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
+- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
+- client.chat.completions.list({ ...params }) -> CompletionListResponse
+
+# Completions
+
+Types:
+
+- CompletionCreateResponse
+
+Methods:
+
+- client.completions.create({ ...params }) -> CompletionCreateResponse
+
# VectorIo
Types:
@@ -230,11 +296,47 @@ Types:
Methods:
-- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse | null
+- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse
- client.vectorDBs.list() -> VectorDBListResponse
- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse
- client.vectorDBs.unregister(vectorDBId) -> void
+# VectorStores
+
+Types:
+
+- ListVectorStoresResponse
+- VectorStore
+- VectorStoreDeleteResponse
+- VectorStoreSearchResponse
+
+Methods:
+
+- client.vectorStores.create({ ...params }) -> VectorStore
+- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.vectorStores.list({ ...params }) -> ListVectorStoresResponse
+- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
+- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
+
+## Files
+
+Types:
+
+- VectorStoreFile
+- FileListResponse
+- FileDeleteResponse
+- FileContentResponse
+
+Methods:
+
+- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.list(vectorStoreId, { ...params }) -> FileListResponse
+- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
+- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+
# Models
Types:
@@ -245,7 +347,7 @@ Types:
Methods:
-- client.models.retrieve(modelId) -> Model | null
+- client.models.retrieve(modelId) -> Model
- client.models.list() -> ModelListResponse
- client.models.register({ ...params }) -> Model
- client.models.unregister(modelId) -> void
@@ -273,10 +375,10 @@ Types:
Methods:
-- client.postTraining.job.list() -> JobListResponse
-- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse | null
+- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data>
+- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
- client.postTraining.job.cancel({ ...params }) -> void
-- client.postTraining.job.status({ ...params }) -> JobStatusResponse | null
+- client.postTraining.job.status({ ...params }) -> JobStatusResponse
# Providers
@@ -287,7 +389,8 @@ Types:
Methods:
-- client.providers.list() -> ProviderListResponse
+- client.providers.retrieve(providerId) -> ProviderInfo
+- client.providers.list() -> ProviderListResponse
# Routes
@@ -300,6 +403,16 @@ Methods:
- client.routes.list() -> RouteListResponse
+# Moderations
+
+Types:
+
+- CreateResponse
+
+Methods:
+
+- client.moderations.create({ ...params }) -> CreateResponse
+
# Safety
Types:
@@ -320,7 +433,7 @@ Types:
Methods:
-- client.shields.retrieve(identifier) -> Shield | null
+- client.shields.retrieve(identifier) -> Shield
- client.shields.list() -> ShieldListResponse
- client.shields.register({ ...params }) -> Shield
@@ -351,24 +464,13 @@ Types:
Methods:
- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse
-- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
+- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
- client.telemetry.getTrace(traceId) -> Trace
- client.telemetry.logEvent({ ...params }) -> void
-- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
-- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
+- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
+- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
- client.telemetry.saveSpansToDataset({ ...params }) -> void
-# Datasetio
-
-Types:
-
-- PaginatedRowsResult
-
-Methods:
-
-- client.datasetio.appendRows({ ...params }) -> void
-- client.datasetio.getRowsPaginated({ ...params }) -> PaginatedRowsResult
-
# Scoring
Types:
@@ -392,20 +494,37 @@ Types:
Methods:
-- client.scoringFunctions.retrieve(scoringFnId) -> ScoringFn | null
+- client.scoringFunctions.retrieve(scoringFnId) -> ScoringFn
- client.scoringFunctions.list() -> ScoringFunctionListResponse
- client.scoringFunctions.register({ ...params }) -> void
-# EvalTasks
+# Benchmarks
+
+Types:
+
+- Benchmark
+- ListBenchmarksResponse
+- BenchmarkListResponse
+
+Methods:
+
+- client.benchmarks.retrieve(benchmarkId) -> Benchmark
+- client.benchmarks.list() -> BenchmarkListResponse
+- client.benchmarks.register({ ...params }) -> void
+
+# Files
Types:
-- EvalTask
-- ListEvalTasksResponse
-- EvalTaskListResponse
+- DeleteFileResponse
+- File
+- ListFilesResponse
+- FileContentResponse
Methods:
-- client.evalTasks.retrieve(evalTaskId) -> EvalTask | null
-- client.evalTasks.list() -> EvalTaskListResponse
-- client.evalTasks.register({ ...params }) -> void
+- client.files.create({ ...params }) -> File
+- client.files.retrieve(fileId) -> File
+- client.files.list({ ...params }) -> ListFilesResponse
+- client.files.delete(fileId) -> DeleteFileResponse
+- client.files.content(fileId) -> unknown
diff --git a/bin/check-release-environment b/bin/check-release-environment
new file mode 100644
index 0000000..6b43775
--- /dev/null
+++ b/bin/check-release-environment
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+errors=()
+
+lenErrors=${#errors[@]}
+
+if [[ lenErrors -gt 0 ]]; then
+ echo -e "Found the following errors in the release environment:\n"
+
+ for error in "${errors[@]}"; do
+ echo -e "- $error\n"
+ done
+
+ exit 1
+fi
+
+echo "The environment is ready to push releases!"
+
diff --git a/bin/publish-npm b/bin/publish-npm
index 4c21181..fa2243d 100644
--- a/bin/publish-npm
+++ b/bin/publish-npm
@@ -4,19 +4,55 @@ set -eux
npm config set '//registry.npmjs.org/:_authToken' "$NPM_TOKEN"
-# Build the project
yarn build
-
-# Navigate to the dist directory
cd dist
-# Get the version from package.json
-VERSION="$(node -p "require('./package.json').version")"
+# Get package name and version from package.json
+PACKAGE_NAME="$(jq -r -e '.name' ./package.json)"
+VERSION="$(jq -r -e '.version' ./package.json)"
+
+# Get latest version from npm
+#
+# If the package doesn't exist, npm will return:
+# {
+# "error": {
+# "code": "E404",
+# "summary": "Unpublished on 2025-06-05T09:54:53.528Z",
+# "detail": "'the_package' is not in this registry..."
+# }
+# }
+NPM_INFO="$(npm view "$PACKAGE_NAME" version --json 2>/dev/null || true)"
+
+# Check if we got an E404 error
+if echo "$NPM_INFO" | jq -e '.error.code == "E404"' > /dev/null 2>&1; then
+ # Package doesn't exist yet, no last version
+ LAST_VERSION=""
+elif echo "$NPM_INFO" | jq -e '.error' > /dev/null 2>&1; then
+ # Report other errors
+ echo "ERROR: npm returned unexpected data:"
+ echo "$NPM_INFO"
+ exit 1
+else
+ # Success - get the version
+ LAST_VERSION=$(echo "$NPM_INFO" | jq -r '.') # strip quotes
+fi
-# Extract the pre-release tag if it exists
+# Check if current version is pre-release (e.g. alpha / beta / rc)
+CURRENT_IS_PRERELEASE=false
if [[ "$VERSION" =~ -([a-zA-Z]+) ]]; then
- # Extract the part before any dot in the pre-release identifier
- TAG="${BASH_REMATCH[1]}"
+ CURRENT_IS_PRERELEASE=true
+ CURRENT_TAG="${BASH_REMATCH[1]}"
+fi
+
+# Check if last version is a stable release
+LAST_IS_STABLE_RELEASE=true
+if [[ -z "$LAST_VERSION" || "$LAST_VERSION" =~ -([a-zA-Z]+) ]]; then
+ LAST_IS_STABLE_RELEASE=false
+fi
+
+# Use a corresponding alpha/beta tag if there already is a stable release and we're publishing a prerelease.
+if $CURRENT_IS_PRERELEASE && $LAST_IS_STABLE_RELEASE; then
+ TAG="$CURRENT_TAG"
else
TAG="latest"
fi
diff --git a/examples/agents.test.ts b/examples/agents.test.ts
deleted file mode 100644
index 9e4f6b9..0000000
--- a/examples/agents.test.ts
+++ /dev/null
@@ -1,67 +0,0 @@
-import { AgentConfig } from "llama-stack-client/resources/shared";
-import LlamaStackClient from 'llama-stack-client';
-
-
-describe('RAG Integration Tests', () => {
- let client: LlamaStackClient;
-
- beforeEach(() => {
- // Create a new client instance
- client = new LlamaStackClient({ baseURL: 'http://localhost:8321' });
- });
-
- it('should create an agent and handle conversation successfully', async () => {
- // Get available models
- const models = await client.models.list();
- const llmModel = models.find(model => model.model_type === 'llm' && !model.identifier.includes('guard') && !model.identifier.includes('405'));
- expect(llmModel).toBeTruthy();
-
- // Create agent with configuration
- const agentConfig: AgentConfig = {
- model: llmModel!.identifier,
- instructions: 'You are a helpful assistant',
- sampling_params: {
- strategy: { type: 'top_p', temperature: 1.0, top_p: 0.9 },
- },
- toolgroups: [],
- tool_choice: 'auto',
- tool_prompt_format: 'json',
- input_shields: [],
- output_shields: [],
- enable_session_persistence: false,
- max_infer_iters: 10,
- };
-
- const agentResponse = await client.agents.create({ agent_config: agentConfig });
- expect(agentResponse.agent_id).toBeTruthy();
-
- const sessionResponse = await client.agents.session.create(agentResponse.agent_id, { session_name: 'test-session' });
- expect(sessionResponse.session_id).toBeTruthy();
-
- const userPrompts = [
- 'Hello',
- 'What is local time currently in California? Search the web for the answer.',
- ];
- for (const prompt of userPrompts) {
- const turnResponse = await client.agents.turn.create(
- agentResponse.agent_id,
- sessionResponse.session_id,
- {
- stream: true,
- messages: [
- {
- role: 'user',
- content: prompt,
- },
- ],
- }
- );
- // Test the response streaming
- for await (const chunk of turnResponse) {
- if (chunk.event.payload.event_type === 'turn_complete') {
- expect(chunk.event.payload.turn.output_message).toBeTruthy();
- }
- }
- }
- }, 30000);
-});
\ No newline at end of file
diff --git a/examples/agents.ts b/examples/agents.ts
deleted file mode 100755
index 340c7ba..0000000
--- a/examples/agents.ts
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env -S npm run tsn -T
-
-import { AgentConfig } from "llama-stack-client/resources/shared";
-
-import { LlamaStackClient, ClientOptions } from 'llama-stack-client';
-
-const options: ClientOptions = { baseURL: 'http://localhost:8321' };
-if (process.env["TAVILY_SEARCH_API_KEY"]) {
- const tavilyHeader = JSON.stringify({tavily_search_api_key: process.env["TAVILY_SEARCH_API_KEY"]});
- options.defaultHeaders = { 'X-LlamaStack-Provider-Data': tavilyHeader }
-}
-const client = new LlamaStackClient(options);
-
-async function main() {
- const availableModels = (await client.models.list())
- .filter((model: any) =>
- model.model_type === 'llm' &&
- !model.identifier.includes('guard') &&
- !model.identifier.includes('405')
- )
- .map((model: any) => model.identifier);
-
- if (availableModels.length === 0) {
- console.log('No available models. Exiting.');
- return;
- }
- const selectedModel = availableModels[0];
- console.log(`Using model: ${selectedModel}`);
-
- // Check for Tavily API key
- if (!process.env["TAVILY_SEARCH_API_KEY"]) {
- console.log('Warning: TAVILY_SEARCH_API_KEY is not set; will not use websearch tool.');
- }
-
- // Configure agent
- const agentConfig: AgentConfig = {
- model: selectedModel,
- instructions: 'You are a helpful assistant',
- sampling_params: {
- strategy: { type: 'top_p', temperature: 1.0, top_p: 0.9 },
- },
- toolgroups: process.env["TAVILY_SEARCH_API_KEY"] ? ['builtin::websearch'] : [],
- tool_choice: 'auto',
- tool_prompt_format: 'python_list',
- input_shields: [],
- output_shields: [],
- enable_session_persistence: false,
- max_infer_iters: 10,
- };
- console.log('Agent Configuration:', JSON.stringify(agentConfig, null, 2));
-
- const agentic_system_create_response = await client.agents.create({agent_config: agentConfig});
- const agent_id = agentic_system_create_response.agent_id;
- console.log(`Agent ID: ${agent_id}`);
- const userPrompts = [
- 'Hello',
- 'What is local time currently in California? Search the web for the answer.',
- ];
-
- const create_session_response = await client.agents.session.create(agent_id, {session_name: 'test-session'});
- const session_id = create_session_response.session_id;
- console.log(`Session ID: ${session_id}`);
-
- for (const prompt of userPrompts) {
- const response = await client.agents.turn.create(
- agent_id,
- session_id,
- {
- stream: true,
- messages: [
- {
- role: 'user',
- content: prompt,
- },
- ],
- },
- );
-
- // Log the response events
- for await (const chunk of response) {
- if (chunk.event.payload.event_type === 'turn_complete') {
- console.log(chunk.event.payload.turn.output_message);
- }
- }
- }
-
-}
-
-main();
diff --git a/examples/inference.test.ts b/examples/inference.test.ts
deleted file mode 100644
index 36fe51b..0000000
--- a/examples/inference.test.ts
+++ /dev/null
@@ -1,60 +0,0 @@
-import LlamaStackClient from 'llama-stack-client';
-
-describe('LlamaStack Client Integration Tests', () => {
- let client: LlamaStackClient;
- let availableModels: string[];
-
- beforeAll(async () => {
- client = new LlamaStackClient({ baseURL: 'http://localhost:8321' });
-
- // Fetch available models once
- const models = await client.models.list();
- availableModels = models
- .filter((model: any) =>
- model.model_type === 'llm' &&
- !model.identifier.includes('guard') &&
- !model.identifier.includes('405')
- )
- .map((model: any) => model.identifier);
- });
-
- test('should list available models', async () => {
- const models = await client.models.list();
- expect(models).toBeDefined();
- expect(Array.isArray(models)).toBe(true);
- });
-
- test('should perform non-streaming chat completion', async () => {
- // Skip test if no models available
- if (availableModels.length === 0) {
- console.warn('Skipping test: No available models');
- return;
- }
- const chatCompletion = await client.inference.chatCompletion({
- messages: [{ content: 'Hello, how are you?', role: 'user' }],
- model_id: availableModels[0] as string,
- });
- expect(chatCompletion).toBeDefined();
- expect(chatCompletion.completion_message).toBeDefined();
- }, 30000);
-
- test('should perform streaming chat completion', async () => {
- // Skip test if no models available
- if (availableModels.length === 0) {
- console.warn('Skipping test: No available models');
- return;
- }
- const stream = await client.inference.chatCompletion({
- messages: [{ content: 'Hello, how are you?', role: 'user' }],
- model_id: availableModels[0] as string,
- stream: true,
- });
-
- const chunks: any[] = [];
- for await (const chunk of stream) {
- expect(chunk).toBeDefined();
- chunks.push(chunk);
- }
- expect(chunks.length).toBeGreaterThan(0);
- }, 30000);
-});
\ No newline at end of file
diff --git a/examples/inference.ts b/examples/inference.ts
deleted file mode 100755
index 2fa56b1..0000000
--- a/examples/inference.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env -S npm run tsn -T
-
-import LlamaStackClient from 'llama-stack-client';
-const client = new LlamaStackClient({ baseURL: 'http://localhost:8321' });
-
-async function main() {
- // list models
- const availableModels = (await client.models.list())
- .filter((model: any) =>
- model.model_type === 'llm' &&
- !model.identifier.includes('guard') &&
- !model.identifier.includes('405')
- )
- .map((model: any) => model.identifier);
-
- console.log(availableModels);
-
- if (availableModels.length === 0) {
- console.log('No available models. Exiting.');
- return;
- }
- const selectedModel = availableModels[0];
- console.log(`Using model: ${selectedModel}`);
-
- // non-streaming chat-completion
- const chatCompletion = await client.inference.chatCompletion({
- messages: [{ content: 'Hello, how are you?', role: 'user' }],
- model_id: selectedModel,
- });
- console.log(chatCompletion);
-
- // streaming chat-completion
- const stream = await client.inference.chatCompletion({
- messages: [{ content: 'Hello, how are you?', role: 'user' }],
- model_id: selectedModel,
- stream: true,
- });
- for await (const chunk of stream) {
- if (chunk.event.delta.type === 'text') {
- process.stdout.write(chunk.event.delta.text || '');
- }
- }
- process.stdout.write('\n');
-}
-
-main();
\ No newline at end of file
diff --git a/package.json b/package.json
index a84609a..19cb1a3 100644
--- a/package.json
+++ b/package.json
@@ -1,20 +1,20 @@
{
"name": "llama-stack-client",
- "version": "0.2.13",
+ "version": "0.2.17",
"description": "The official TypeScript library for the Llama Stack Client API",
"author": "Llama Stack Client ",
"types": "dist/index.d.ts",
"main": "dist/index.js",
"type": "commonjs",
- "repository": "github:meta-llama/llama-stack-client-node",
- "license": "Apache-2.0",
+ "repository": "github:llamastack/llama-stack-client-typescript",
+ "license": "MIT",
"packageManager": "yarn@1.22.22",
"files": [
"**/*"
],
"private": false,
"scripts": {
- "test": "jest",
+ "test": "./scripts/test",
"build": "./scripts/build",
"prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1",
"format": "prettier --write --cache --cache-strategy metadata . !dist",
@@ -30,8 +30,7 @@
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
- "node-fetch": "^2.6.7",
- "tsx": "^4.19.2"
+ "node-fetch": "^2.6.7"
},
"devDependencies": {
"@swc/core": "^1.3.102",
@@ -59,10 +58,6 @@
"./shims/web.js",
"./shims/web.mjs"
],
- "imports": {
- "llama-stack-client": ".",
- "llama-stack-client/*": "./src/*"
- },
"exports": {
"./_shims/auto/*": {
"deno": {
diff --git a/release-please-config.json b/release-please-config.json
new file mode 100644
index 0000000..624ed99
--- /dev/null
+++ b/release-please-config.json
@@ -0,0 +1,67 @@
+{
+ "packages": {
+ ".": {}
+ },
+ "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json",
+ "include-v-in-tag": true,
+ "include-component-in-tag": false,
+ "versioning": "prerelease",
+ "prerelease": true,
+ "bump-minor-pre-major": true,
+ "bump-patch-for-minor-pre-major": false,
+ "pull-request-header": "Automated Release PR",
+ "pull-request-title-pattern": "release: ${version}",
+ "changelog-sections": [
+ {
+ "type": "feat",
+ "section": "Features"
+ },
+ {
+ "type": "fix",
+ "section": "Bug Fixes"
+ },
+ {
+ "type": "perf",
+ "section": "Performance Improvements"
+ },
+ {
+ "type": "revert",
+ "section": "Reverts"
+ },
+ {
+ "type": "chore",
+ "section": "Chores"
+ },
+ {
+ "type": "docs",
+ "section": "Documentation"
+ },
+ {
+ "type": "style",
+ "section": "Styles"
+ },
+ {
+ "type": "refactor",
+ "section": "Refactors"
+ },
+ {
+ "type": "test",
+ "section": "Tests",
+ "hidden": true
+ },
+ {
+ "type": "build",
+ "section": "Build System"
+ },
+ {
+ "type": "ci",
+ "section": "Continuous Integration",
+ "hidden": true
+ }
+ ],
+ "release-type": "node",
+ "extra-files": [
+ "src/version.ts",
+ "README.md"
+ ]
+}
diff --git a/scripts/bootstrap b/scripts/bootstrap
index 05dd47a..0af58e2 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,7 +4,7 @@ set -e
cd "$(dirname "$0")/.."
-if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then
brew bundle check >/dev/null 2>&1 || {
echo "==> Installing Homebrew dependencies…"
brew bundle
diff --git a/scripts/build b/scripts/build
index ca9524d..3bf4e0f 100755
--- a/scripts/build
+++ b/scripts/build
@@ -28,7 +28,7 @@ fi
node scripts/utils/make-dist-package-json.cjs > dist/package.json
# build to .js/.mjs/.d.ts files
-npm exec tsc-multi
+./node_modules/.bin/tsc-multi
# copy over handwritten .js/.mjs/.d.ts files
cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims
cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
new file mode 100755
index 0000000..34d5407
--- /dev/null
+++ b/scripts/utils/upload-artifact.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+set -exuo pipefail
+
+RESPONSE=$(curl -X POST "$URL" \
+ -H "Authorization: Bearer $AUTH" \
+ -H "Content-Type: application/json")
+
+SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url')
+
+if [[ "$SIGNED_URL" == "null" ]]; then
+ echo -e "\033[31mFailed to get signed URL.\033[0m"
+ exit 1
+fi
+
+UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \
+ -H "Content-Type: application/gzip" \
+ --data-binary @- "$SIGNED_URL" 2>&1)
+
+if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
+ echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
+ echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/llama-stack-client-node/$SHA'\033[0m"
+else
+ echo -e "\033[31mFailed to upload artifact.\033[0m"
+ exit 1
+fi
diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts
index 2ce3036..71182fb 100644
--- a/src/_shims/index-deno.ts
+++ b/src/_shims/index-deno.ts
@@ -79,7 +79,7 @@ export function getDefaultAgent(url: string) {
}
export function fileFromPath() {
throw new Error(
- 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/stainless-sdks/llama-stack-node#file-uploads',
+ 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/llamastack/llama-stack-client-typescript#file-uploads',
);
}
diff --git a/src/_shims/web-runtime.ts b/src/_shims/web-runtime.ts
index dd95444..8237f0e 100644
--- a/src/_shims/web-runtime.ts
+++ b/src/_shims/web-runtime.ts
@@ -95,7 +95,7 @@ export function getRuntime({ manuallyImported }: { manuallyImported?: boolean }
getDefaultAgent: (url: string) => undefined,
fileFromPath: () => {
throw new Error(
- 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/stainless-sdks/llama-stack-node#file-uploads',
+ 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/llamastack/llama-stack-client-typescript#file-uploads',
);
},
isFsReadStream: (value: any) => false,
diff --git a/src/core.ts b/src/core.ts
index 6cd9d53..19986c7 100644
--- a/src/core.ts
+++ b/src/core.ts
@@ -231,7 +231,7 @@ export abstract class APIClient {
protected defaultHeaders(opts: FinalRequestOptions): Headers {
return {
Accept: 'application/json',
- 'Content-Type': 'application/json',
+ ...(['head', 'get'].includes(opts.method) ? {} : { 'Content-Type': 'application/json' }),
'User-Agent': this.getUserAgent(),
...getPlatformHeaders(),
...this.authHeaders(opts),
@@ -313,10 +313,10 @@ export abstract class APIClient {
return null;
}
- buildRequest(
+ async buildRequest(
inputOptions: FinalRequestOptions,
{ retryCount = 0 }: { retryCount?: number } = {},
- ): { req: RequestInit; url: string; timeout: number } {
+ ): Promise<{ req: RequestInit; url: string; timeout: number }> {
const options = { ...inputOptions };
const { method, path, query, defaultBaseURL, headers: headers = {} } = options;
@@ -464,7 +464,9 @@ export abstract class APIClient {
await this.prepareOptions(options);
- const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining });
+ const { req, url, timeout } = await this.buildRequest(options, {
+ retryCount: maxRetries - retriesRemaining,
+ });
await this.prepareRequest(req, { url, options });
diff --git a/src/index.ts b/src/index.ts
index 6c7f39c..042239d 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -23,6 +23,7 @@ import {
Completions,
} from './resources/completions';
import {
+ DatasetAppendrowsParams,
DatasetIterrowsParams,
DatasetIterrowsResponse,
DatasetListResponse,
@@ -67,6 +68,7 @@ import {
ModelRegisterParams,
Models,
} from './resources/models';
+import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations';
import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers';
import { ListRoutesResponse, RouteListResponse, Routes } from './resources/routes';
import { RunShieldResponse, Safety, SafetyRunShieldParams } from './resources/safety';
@@ -139,6 +141,9 @@ import {
import {
AgentCreateParams,
AgentCreateResponse,
+ AgentListParams,
+ AgentListResponse,
+ AgentRetrieveResponse,
Agents,
InferenceStep,
MemoryRetrievalStep,
@@ -198,14 +203,14 @@ import {
export interface ClientOptions {
/**
- * Defaults to process.env['LLAMA_STACK_API_KEY'].
+ * Defaults to process.env['LLAMA_STACK_CLIENT_API_KEY'].
*/
apiKey?: string | null | undefined;
/**
* Override the default base URL for the API, e.g., "https://api.example.com/v2/"
*
- * Defaults to process.env['LLAMA_STACK_BASE_URL'].
+ * Defaults to process.env['LLAMA_STACK_CLIENT_BASE_URL'].
*/
baseURL?: string | null | undefined;
@@ -215,6 +220,8 @@ export interface ClientOptions {
*
* Note that request timeouts are retried by default, so in a worst-case scenario you may wait
* much longer than this timeout before the promise succeeds or fails.
+ *
+ * @unit milliseconds
*/
timeout?: number | undefined;
@@ -270,8 +277,8 @@ export class LlamaStackClient extends Core.APIClient {
/**
* API Client for interfacing with the Llama Stack Client API.
*
- * @param {string | null | undefined} [opts.apiKey=process.env['LLAMA_STACK_API_KEY'] ?? null]
- * @param {string} [opts.baseURL=process.env['LLAMA_STACK_BASE_URL'] ?? http://any-hosted-llama-stack.com] - Override the default base URL for the API.
+ * @param {string | null | undefined} [opts.apiKey=process.env['LLAMA_STACK_CLIENT_API_KEY'] ?? null]
+ * @param {string} [opts.baseURL=process.env['LLAMA_STACK_CLIENT_BASE_URL'] ?? http://any-hosted-llama-stack.com] - Override the default base URL for the API.
* @param {number} [opts.timeout=1 minute] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
@@ -280,8 +287,8 @@ export class LlamaStackClient extends Core.APIClient {
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
*/
constructor({
- baseURL = Core.readEnv('LLAMA_STACK_BASE_URL'),
- apiKey = Core.readEnv('LLAMA_STACK_API_KEY') ?? null,
+ baseURL = Core.readEnv('LLAMA_STACK_CLIENT_BASE_URL'),
+ apiKey = Core.readEnv('LLAMA_STACK_CLIENT_API_KEY') ?? null,
...opts
}: ClientOptions = {}) {
const options: ClientOptions = {
@@ -323,6 +330,7 @@ export class LlamaStackClient extends Core.APIClient {
postTraining: API.PostTraining = new API.PostTraining(this);
providers: API.Providers = new API.Providers(this);
routes: API.Routes = new API.Routes(this);
+ moderations: API.Moderations = new API.Moderations(this);
safety: API.Safety = new API.Safety(this);
shields: API.Shields = new API.Shields(this);
syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this);
@@ -401,6 +409,7 @@ LlamaStackClient.Models = Models;
LlamaStackClient.PostTraining = PostTraining;
LlamaStackClient.Providers = Providers;
LlamaStackClient.Routes = Routes;
+LlamaStackClient.Moderations = Moderations;
LlamaStackClient.Safety = Safety;
LlamaStackClient.Shields = Shields;
LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration;
@@ -462,7 +471,10 @@ export declare namespace LlamaStackClient {
type ToolExecutionStep as ToolExecutionStep,
type ToolResponse as ToolResponse,
type AgentCreateResponse as AgentCreateResponse,
+ type AgentRetrieveResponse as AgentRetrieveResponse,
+ type AgentListResponse as AgentListResponse,
type AgentCreateParams as AgentCreateParams,
+ type AgentListParams as AgentListParams,
};
export {
@@ -472,6 +484,7 @@ export declare namespace LlamaStackClient {
type DatasetListResponse as DatasetListResponse,
type DatasetIterrowsResponse as DatasetIterrowsResponse,
type DatasetRegisterResponse as DatasetRegisterResponse,
+ type DatasetAppendrowsParams as DatasetAppendrowsParams,
type DatasetIterrowsParams as DatasetIterrowsParams,
type DatasetRegisterParams as DatasetRegisterParams,
};
@@ -587,6 +600,12 @@ export declare namespace LlamaStackClient {
type RouteListResponse as RouteListResponse,
};
+ export {
+ Moderations as Moderations,
+ type CreateResponse as CreateResponse,
+ type ModerationCreateParams as ModerationCreateParams,
+ };
+
export {
Safety as Safety,
type RunShieldResponse as RunShieldResponse,
diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts
index 01e80b4..35a4d62 100644
--- a/src/resources/agents/agents.ts
+++ b/src/resources/agents/agents.ts
@@ -1,6 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
import * as Core from '../../core';
import * as Shared from '../shared';
import * as SessionAPI from './session';
@@ -8,6 +9,8 @@ import {
Session,
SessionCreateParams,
SessionCreateResponse,
+ SessionListParams,
+ SessionListResponse,
SessionResource,
SessionRetrieveParams,
} from './session';
@@ -40,6 +43,28 @@ export class Agents extends APIResource {
return this._client.post('/v1/agents', { body, ...options });
}
+ /**
+ * Describe an agent by its ID.
+ */
+ retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/agents/${agentId}`, options);
+ }
+
+ /**
+ * List all agents.
+ */
+ list(query?: AgentListParams, options?: Core.RequestOptions): Core.APIPromise;
+ list(options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ query: AgentListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this._client.get('/v1/agents', { query, ...options });
+ }
+
/**
* Delete an agent by its ID and its associated sessions and turns.
*/
@@ -201,23 +226,81 @@ export interface ToolExecutionStep {
started_at?: string;
}
+/**
+ * Response from a tool invocation.
+ */
export interface ToolResponse {
+ /**
+ * Unique identifier for the tool call this response is for
+ */
call_id: string;
/**
- * A image content item
+ * The response content from the tool
*/
content: Shared.InterleavedContent;
+ /**
+ * Name of the tool that was invoked
+ */
tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
- metadata?: Record | unknown | null>;
+ /**
+ * (Optional) Additional metadata about the tool response
+ */
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
}
+/**
+ * Response returned when creating a new agent.
+ */
export interface AgentCreateResponse {
+ /**
+ * Unique identifier for the created agent
+ */
agent_id: string;
}
+/**
+ * An agent instance with configuration and metadata.
+ */
+export interface AgentRetrieveResponse {
+ /**
+ * Configuration settings for the agent
+ */
+ agent_config: Shared.AgentConfig;
+
+ /**
+ * Unique identifier for the agent
+ */
+ agent_id: string;
+
+ /**
+ * Timestamp when the agent was created
+ */
+ created_at: string;
+}
+
+/**
+ * A generic paginated response that follows a simple format.
+ */
+export interface AgentListResponse {
+ /**
+ * The list of items for the current page
+ */
+ data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+
+ /**
+ * Whether there are more items available after this set
+ */
+ has_more: boolean;
+
+ /**
+ * The URL for accessing this list
+ */
+ url?: string;
+}
+
export interface AgentCreateParams {
/**
* The configuration for the agent.
@@ -225,6 +308,18 @@ export interface AgentCreateParams {
agent_config: Shared.AgentConfig;
}
+export interface AgentListParams {
+ /**
+ * The number of agents to return.
+ */
+ limit?: number;
+
+ /**
+ * The index to start the pagination from.
+ */
+ start_index?: number;
+}
+
Agents.SessionResource = SessionResource;
Agents.Steps = Steps;
Agents.TurnResource = TurnResource;
@@ -237,15 +332,20 @@ export declare namespace Agents {
type ToolExecutionStep as ToolExecutionStep,
type ToolResponse as ToolResponse,
type AgentCreateResponse as AgentCreateResponse,
+ type AgentRetrieveResponse as AgentRetrieveResponse,
+ type AgentListResponse as AgentListResponse,
type AgentCreateParams as AgentCreateParams,
+ type AgentListParams as AgentListParams,
};
export {
SessionResource as SessionResource,
type Session as Session,
type SessionCreateResponse as SessionCreateResponse,
+ type SessionListResponse as SessionListResponse,
type SessionCreateParams as SessionCreateParams,
type SessionRetrieveParams as SessionRetrieveParams,
+ type SessionListParams as SessionListParams,
};
export { Steps as Steps, type StepRetrieveResponse as StepRetrieveResponse };
diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts
index 5cc54ca..88a44bf 100644
--- a/src/resources/agents/index.ts
+++ b/src/resources/agents/index.ts
@@ -8,14 +8,19 @@ export {
type ToolExecutionStep,
type ToolResponse,
type AgentCreateResponse,
+ type AgentRetrieveResponse,
+ type AgentListResponse,
type AgentCreateParams,
+ type AgentListParams,
} from './agents';
export {
SessionResource,
type Session,
type SessionCreateResponse,
+ type SessionListResponse,
type SessionCreateParams,
type SessionRetrieveParams,
+ type SessionListParams,
} from './session';
export { Steps, type StepRetrieveResponse } from './steps';
export {
diff --git a/src/resources/agents/session.ts b/src/resources/agents/session.ts
index 304c028..35c8511 100644
--- a/src/resources/agents/session.ts
+++ b/src/resources/agents/session.ts
@@ -39,6 +39,26 @@ export class SessionResource extends APIResource {
return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options });
}
+ /**
+ * List all session(s) of a given agent.
+ */
+ list(
+ agentId: string,
+ query?: SessionListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ list(agentId: string, options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ agentId: string,
+ query: SessionListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list(agentId, {}, query);
+ }
+ return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options });
+ }
+
/**
* Delete an agent session by its ID and its associated turns.
*/
@@ -54,19 +74,57 @@ export class SessionResource extends APIResource {
* A single session of an interaction with an Agentic System.
*/
export interface Session {
+ /**
+ * Unique identifier for the conversation session
+ */
session_id: string;
+ /**
+ * Human-readable name for the session
+ */
session_name: string;
+ /**
+ * Timestamp when the session was created
+ */
started_at: string;
+ /**
+ * List of all turns that have occurred in this session
+ */
turns: Array;
}
+/**
+ * Response returned when creating a new agent session.
+ */
export interface SessionCreateResponse {
+ /**
+ * Unique identifier for the created session
+ */
session_id: string;
}
+/**
+ * A generic paginated response that follows a simple format.
+ */
+export interface SessionListResponse {
+ /**
+ * The list of items for the current page
+ */
+ data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+
+ /**
+ * Whether there are more items available after this set
+ */
+ has_more: boolean;
+
+ /**
+ * The URL for accessing this list
+ */
+ url?: string;
+}
+
export interface SessionCreateParams {
/**
* The name of the session to create.
@@ -81,11 +139,25 @@ export interface SessionRetrieveParams {
turn_ids?: Array;
}
+export interface SessionListParams {
+ /**
+ * The number of sessions to return.
+ */
+ limit?: number;
+
+ /**
+ * The index to start the pagination from.
+ */
+ start_index?: number;
+}
+
export declare namespace SessionResource {
export {
type Session as Session,
type SessionCreateResponse as SessionCreateResponse,
+ type SessionListResponse as SessionListResponse,
type SessionCreateParams as SessionCreateParams,
type SessionRetrieveParams as SessionRetrieveParams,
+ type SessionListParams as SessionListParams,
};
}
diff --git a/src/resources/agents/steps.ts b/src/resources/agents/steps.ts
index 1abf04b..8d2d821 100644
--- a/src/resources/agents/steps.ts
+++ b/src/resources/agents/steps.ts
@@ -22,9 +22,12 @@ export class Steps extends APIResource {
}
}
+/**
+ * Response containing details of a specific agent step.
+ */
export interface StepRetrieveResponse {
/**
- * An inference step in an agent turn.
+ * The complete step data and execution details
*/
step:
| AgentsAPI.InferenceStep
diff --git a/src/resources/agents/turn.ts b/src/resources/agents/turn.ts
index de3d226..0273625 100644
--- a/src/resources/agents/turn.ts
+++ b/src/resources/agents/turn.ts
@@ -98,9 +98,12 @@ export class TurnResource extends APIResource {
}
/**
- * streamed agent turn completion response.
+ * Streamed agent turn completion response.
*/
export interface AgentTurnResponseStreamChunk {
+ /**
+ * Individual event in the agent turn response stream
+ */
event: TurnResponseEvent;
}
@@ -108,17 +111,29 @@ export interface AgentTurnResponseStreamChunk {
* A single turn in an interaction with an Agentic System.
*/
export interface Turn {
+ /**
+ * List of messages that initiated this turn
+ */
input_messages: Array;
/**
- * A message containing the model's (assistant) response in a chat conversation.
+ * The model's generated response containing content and metadata
*/
output_message: Shared.CompletionMessage;
+ /**
+ * Unique identifier for the conversation session
+ */
session_id: string;
+ /**
+ * Timestamp when the turn began
+ */
started_at: string;
+ /**
+ * Ordered list of processing steps executed during this turn
+ */
steps: Array<
| AgentsAPI.InferenceStep
| AgentsAPI.ToolExecutionStep
@@ -126,10 +141,19 @@ export interface Turn {
| AgentsAPI.MemoryRetrievalStep
>;
+ /**
+ * Unique identifier for the turn within a session
+ */
turn_id: string;
+ /**
+ * (Optional) Timestamp when the turn finished, if completed
+ */
completed_at?: string;
+ /**
+ * (Optional) Files or media attached to the agent's response
+ */
output_attachments?: Array;
}
@@ -193,6 +217,9 @@ export namespace Turn {
* Note that URL could have length limits.
*/
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -213,16 +240,31 @@ export namespace Turn {
type: 'text';
}
+ /**
+ * A URL reference to external content.
+ */
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
}
+/**
+ * An event in an agent turn response stream.
+ */
export interface TurnResponseEvent {
+ /**
+ * Event-specific payload containing event data
+ */
payload: TurnResponseEventPayload;
}
+/**
+ * Payload for step start events in agent turn responses.
+ */
export type TurnResponseEventPayload =
| TurnResponseEventPayload.AgentTurnResponseStepStartPayload
| TurnResponseEventPayload.AgentTurnResponseStepProgressPayload
@@ -232,37 +274,67 @@ export type TurnResponseEventPayload =
| TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload;
export namespace TurnResponseEventPayload {
+ /**
+ * Payload for step start events in agent turn responses.
+ */
export interface AgentTurnResponseStepStartPayload {
+ /**
+ * Type of event being reported
+ */
event_type: 'step_start';
+ /**
+ * Unique identifier for the step within a turn
+ */
step_id: string;
/**
- * Type of the step in an agent turn.
+ * Type of step being executed
*/
step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
- metadata?: Record | unknown | null>;
+ /**
+ * (Optional) Additional metadata for the step
+ */
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
}
+ /**
+ * Payload for step progress events in agent turn responses.
+ */
export interface AgentTurnResponseStepProgressPayload {
+ /**
+ * Incremental content changes during step execution
+ */
delta: Shared.ContentDelta;
+ /**
+ * Type of event being reported
+ */
event_type: 'step_progress';
+ /**
+ * Unique identifier for the step within a turn
+ */
step_id: string;
/**
- * Type of the step in an agent turn.
+ * Type of step being executed
*/
step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
}
+ /**
+ * Payload for step completion events in agent turn responses.
+ */
export interface AgentTurnResponseStepCompletePayload {
+ /**
+ * Type of event being reported
+ */
event_type: 'step_complete';
/**
- * An inference step in an agent turn.
+ * Complete details of the executed step
*/
step_details:
| AgentsAPI.InferenceStep
@@ -270,34 +342,58 @@ export namespace TurnResponseEventPayload {
| AgentsAPI.ShieldCallStep
| AgentsAPI.MemoryRetrievalStep;
+ /**
+ * Unique identifier for the step within a turn
+ */
step_id: string;
/**
- * Type of the step in an agent turn.
+ * Type of step being executed
*/
step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
}
+ /**
+ * Payload for turn start events in agent turn responses.
+ */
export interface AgentTurnResponseTurnStartPayload {
+ /**
+ * Type of event being reported
+ */
event_type: 'turn_start';
+ /**
+ * Unique identifier for the turn within a session
+ */
turn_id: string;
}
+ /**
+ * Payload for turn completion events in agent turn responses.
+ */
export interface AgentTurnResponseTurnCompletePayload {
+ /**
+ * Type of event being reported
+ */
event_type: 'turn_complete';
/**
- * A single turn in an interaction with an Agentic System.
+ * Complete turn data including all steps and results
*/
turn: TurnAPI.Turn;
}
+ /**
+ * Payload for turn awaiting input events in agent turn responses.
+ */
export interface AgentTurnResponseTurnAwaitingInputPayload {
+ /**
+ * Type of event being reported
+ */
event_type: 'turn_awaiting_input';
/**
- * A single turn in an interaction with an Agentic System.
+ * Turn data when waiting for external tool responses
*/
turn: TurnAPI.Turn;
}
@@ -395,6 +491,9 @@ export namespace TurnCreateParams {
* Note that URL could have length limits.
*/
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -415,7 +514,13 @@ export namespace TurnCreateParams {
type: 'text';
}
+ /**
+ * A URL reference to external content.
+ */
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -453,7 +558,7 @@ export namespace TurnCreateParams {
}
export interface AgentToolGroupWithArgs {
- args: Record | unknown | null>;
+ args: { [key: string]: boolean | number | string | Array | unknown | null };
name: string;
}
diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts
index 35c8fe0..b6b8363 100644
--- a/src/resources/benchmarks.ts
+++ b/src/resources/benchmarks.ts
@@ -32,17 +32,32 @@ export class Benchmarks extends APIResource {
}
}
+/**
+ * A benchmark resource for evaluating model performance.
+ */
export interface Benchmark {
+ /**
+ * Identifier of the dataset to use for the benchmark evaluation
+ */
dataset_id: string;
identifier: string;
- metadata: Record | unknown | null>;
+ /**
+ * Metadata for this evaluation task
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
provider_id: string;
+ /**
+ * List of scoring function identifiers to apply during evaluation
+ */
scoring_functions: Array;
+ /**
+ * The resource type, always benchmark
+ */
type: 'benchmark';
provider_resource_id?: string;
@@ -73,7 +88,7 @@ export interface BenchmarkRegisterParams {
/**
* The metadata to use for the benchmark.
*/
- metadata?: Record | unknown | null>;
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The ID of the provider benchmark to use for the benchmark.
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 3c693ee..a38445d 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -100,20 +100,44 @@ export namespace ChatCompletionChunk {
}
export namespace Delta {
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 5870c59..6331b0a 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -141,6 +141,7 @@ export namespace CompletionCreateResponse {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -155,25 +156,68 @@ export namespace CompletionCreateResponse {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -185,12 +229,7 @@ export namespace CompletionCreateResponse {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -204,25 +243,20 @@ export namespace CompletionCreateResponse {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -238,12 +272,7 @@ export namespace CompletionCreateResponse {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -257,40 +286,59 @@ export namespace CompletionCreateResponse {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -304,12 +352,7 @@ export namespace CompletionCreateResponse {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -323,25 +366,20 @@ export namespace CompletionCreateResponse {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -351,12 +389,7 @@ export namespace CompletionCreateResponse {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -370,25 +403,20 @@ export namespace CompletionCreateResponse {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -546,6 +574,7 @@ export namespace CompletionRetrieveResponse {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -560,25 +589,68 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -590,12 +662,7 @@ export namespace CompletionRetrieveResponse {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -609,25 +676,20 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -643,12 +705,7 @@ export namespace CompletionRetrieveResponse {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -662,40 +719,59 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -709,12 +785,7 @@ export namespace CompletionRetrieveResponse {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -728,25 +799,20 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -756,12 +822,7 @@ export namespace CompletionRetrieveResponse {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -775,24 +836,19 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
- text: string;
-
- type: 'text';
- }
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
- detail?: string;
- }
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
}
}
@@ -882,6 +938,7 @@ export namespace CompletionRetrieveResponse {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -896,25 +953,68 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -926,12 +1026,7 @@ export namespace CompletionRetrieveResponse {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -945,25 +1040,20 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -979,12 +1069,7 @@ export namespace CompletionRetrieveResponse {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -998,40 +1083,59 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -1045,12 +1149,7 @@ export namespace CompletionRetrieveResponse {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -1064,25 +1163,20 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1092,12 +1186,7 @@ export namespace CompletionRetrieveResponse {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -1111,37 +1200,50 @@ export namespace CompletionRetrieveResponse {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
}
+/**
+ * Response from listing OpenAI-compatible chat completions.
+ */
export interface CompletionListResponse {
+ /**
+ * List of chat completion objects with their input messages
+ */
data: Array;
+ /**
+ * ID of the first completion in this list
+ */
first_id: string;
+ /**
+ * Whether there are more completions available beyond this list
+ */
has_more: boolean;
+ /**
+ * ID of the last completion in this list
+ */
last_id: string;
+ /**
+ * Must be "list" to identify this as a list response
+ */
object: 'list';
}
@@ -1225,6 +1327,7 @@ export namespace CompletionListResponse {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -1239,25 +1342,68 @@ export namespace CompletionListResponse {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -1269,12 +1415,7 @@ export namespace CompletionListResponse {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -1288,25 +1429,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1322,12 +1458,7 @@ export namespace CompletionListResponse {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -1341,40 +1472,59 @@ export namespace CompletionListResponse {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -1388,12 +1538,7 @@ export namespace CompletionListResponse {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -1407,25 +1552,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1435,12 +1575,7 @@ export namespace CompletionListResponse {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -1454,25 +1589,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1561,6 +1691,7 @@ export namespace CompletionListResponse {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -1575,25 +1706,68 @@ export namespace CompletionListResponse {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -1605,12 +1779,7 @@ export namespace CompletionListResponse {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -1624,25 +1793,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1658,12 +1822,7 @@ export namespace CompletionListResponse {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -1677,40 +1836,59 @@ export namespace CompletionListResponse {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -1724,12 +1902,7 @@ export namespace CompletionListResponse {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -1743,25 +1916,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -1771,12 +1939,7 @@ export namespace CompletionListResponse {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -1790,25 +1953,20 @@ export namespace CompletionListResponse {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
}
}
@@ -1841,17 +1999,17 @@ export interface CompletionCreateParamsBase {
/**
* (Optional) The function call to use.
*/
- function_call?: string | Record | unknown | null>;
+ function_call?: string | { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) List of functions to use.
*/
- functions?: Array | unknown | null>>;
+ functions?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* (Optional) The logit bias to use.
*/
- logit_bias?: Record;
+ logit_bias?: { [key: string]: number };
/**
* (Optional) The log probabilities to use.
@@ -1909,7 +2067,7 @@ export interface CompletionCreateParamsBase {
/**
* (Optional) The stream options to use.
*/
- stream_options?: Record | unknown | null>;
+ stream_options?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) The temperature to use.
@@ -1919,12 +2077,12 @@ export interface CompletionCreateParamsBase {
/**
* (Optional) The tool choice to use.
*/
- tool_choice?: string | Record | unknown | null>;
+ tool_choice?: string | { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) The tools to use.
*/
- tools?: Array | unknown | null>>;
+ tools?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* (Optional) The top log probabilities to use.
@@ -1955,6 +2113,7 @@ export namespace CompletionCreateParams {
| Array<
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
| OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
>;
/**
@@ -1969,25 +2128,68 @@ export namespace CompletionCreateParams {
}
export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+ /**
+ * Must be "image_url" to identify this as image content
+ */
type: 'image_url';
}
export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
url: string;
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
detail?: string;
}
}
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
}
/**
@@ -1999,12 +2201,7 @@ export namespace CompletionCreateParams {
* they are concatenated. The underlying Llama Stack code may also add other system
* messages (for example, for formatting tool definitions).
*/
- content:
- | string
- | Array<
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "system" to identify this as a system message
@@ -2018,25 +2215,20 @@ export namespace CompletionCreateParams {
}
export namespace OpenAISystemMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -2052,12 +2244,7 @@ export namespace CompletionCreateParams {
/**
* The content of the model's response
*/
- content?:
- | string
- | Array<
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content?: string | Array;
/**
* (Optional) The name of the assistant message participant.
@@ -2071,40 +2258,59 @@ export namespace CompletionCreateParams {
}
export namespace OpenAIAssistantMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
-
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
type: 'function';
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Function call details
+ */
function?: ToolCall.Function;
+ /**
+ * (Optional) Index of the tool call in the list
+ */
index?: number;
}
export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
arguments?: string;
+ /**
+ * (Optional) Name of the function to call
+ */
name?: string;
}
}
@@ -2118,12 +2324,7 @@ export namespace CompletionCreateParams {
/**
* The response content from the tool
*/
- content:
- | string
- | Array<
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "tool" to identify this as a tool response
@@ -2137,25 +2338,20 @@ export namespace CompletionCreateParams {
}
export namespace OpenAIToolMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
/**
@@ -2165,12 +2361,7 @@ export namespace CompletionCreateParams {
/**
* The content of the developer message
*/
- content:
- | string
- | Array<
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
- | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
- >;
+ content: string | Array;
/**
* Must be "developer" to identify this as a developer message
@@ -2184,50 +2375,81 @@ export namespace CompletionCreateParams {
}
export namespace OpenAIDeveloperMessageParam {
- export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
text: string;
+ /**
+ * Must be "text" to identify this as text content
+ */
type: 'text';
}
-
- export interface OpenAIChatCompletionContentPartImageParam {
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- export interface ImageURL {
- url: string;
-
- detail?: string;
- }
- }
}
+ /**
+ * Text response format for OpenAI-compatible chat completion requests.
+ */
export interface OpenAIResponseFormatText {
+ /**
+ * Must be "text" to indicate plain text response format
+ */
type: 'text';
}
+ /**
+ * JSON schema response format for OpenAI-compatible chat completion requests.
+ */
export interface OpenAIResponseFormatJsonSchema {
+ /**
+ * The JSON schema specification for the response
+ */
json_schema: OpenAIResponseFormatJsonSchema.JsonSchema;
+ /**
+ * Must be "json_schema" to indicate structured JSON response format
+ */
type: 'json_schema';
}
export namespace OpenAIResponseFormatJsonSchema {
+ /**
+ * The JSON schema specification for the response
+ */
export interface JsonSchema {
+ /**
+ * Name of the schema
+ */
name: string;
+ /**
+ * (Optional) Description of the schema
+ */
description?: string;
- schema?: Record | unknown | null>;
+ /**
+ * (Optional) The JSON schema definition
+ */
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Whether to enforce strict adherence to the schema
+ */
strict?: boolean;
}
}
+ /**
+ * JSON object response format for OpenAI-compatible chat completion requests.
+ */
export interface OpenAIResponseFormatJsonObject {
+ /**
+ * Must be "json_object" to indicate generic JSON object response format
+ */
type: 'json_object';
}
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 06e6060..0ade7ab 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -179,7 +179,7 @@ export interface CompletionCreateParamsBase {
/**
* (Optional) The logit bias to use.
*/
- logit_bias?: Record;
+ logit_bias?: { [key: string]: number };
/**
* (Optional) The log probabilities to use.
@@ -221,7 +221,7 @@ export interface CompletionCreateParamsBase {
/**
* (Optional) The stream options to use.
*/
- stream_options?: Record | unknown | null>;
+ stream_options?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) The suffix that should be appended to the completion.
diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts
index 631562e..5ed6661 100644
--- a/src/resources/datasets.ts
+++ b/src/resources/datasets.ts
@@ -21,6 +21,21 @@ export class Datasets extends APIResource {
)._thenUnwrap((obj) => obj.data);
}
+ /**
+ * Append rows to a dataset.
+ */
+ appendrows(
+ datasetId: string,
+ body: DatasetAppendrowsParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, {
+ body,
+ ...options,
+ headers: { Accept: '*/*', ...options?.headers },
+ });
+ }
+
/**
* Get a paginated list of rows from a dataset. Uses offset-based pagination where:
*
@@ -70,27 +85,42 @@ export class Datasets extends APIResource {
}
}
+/**
+ * Response from listing datasets.
+ */
export interface ListDatasetsResponse {
+ /**
+ * List of datasets
+ */
data: DatasetListResponse;
}
+/**
+ * Dataset resource for storing and accessing training or evaluation data.
+ */
export interface DatasetRetrieveResponse {
identifier: string;
- metadata: Record | unknown | null>;
+ /**
+ * Additional metadata for the dataset
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
provider_id: string;
/**
- * Purpose of the dataset. Each purpose has a required input data schema.
+ * Purpose of the dataset indicating its intended use
*/
purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
/**
- * A dataset that can be obtained from a URI.
+ * Data source configuration for the dataset
*/
source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource;
+ /**
+ * Type of resource, always 'dataset' for datasets
+ */
type: 'dataset';
provider_resource_id?: string;
@@ -120,32 +150,44 @@ export namespace DatasetRetrieveResponse {
* "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
* ]
*/
- rows: Array | unknown | null>>;
+ rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
type: 'rows';
}
}
+/**
+ * List of datasets
+ */
export type DatasetListResponse = Array;
export namespace DatasetListResponse {
+ /**
+ * Dataset resource for storing and accessing training or evaluation data.
+ */
export interface DatasetListResponseItem {
identifier: string;
- metadata: Record | unknown | null>;
+ /**
+ * Additional metadata for the dataset
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
provider_id: string;
/**
- * Purpose of the dataset. Each purpose has a required input data schema.
+ * Purpose of the dataset indicating its intended use
*/
purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
/**
- * A dataset that can be obtained from a URI.
+ * Data source configuration for the dataset
*/
source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource;
+ /**
+ * Type of resource, always 'dataset' for datasets
+ */
type: 'dataset';
provider_resource_id?: string;
@@ -175,7 +217,7 @@ export namespace DatasetListResponse {
* "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
* ]
*/
- rows: Array | unknown | null>>;
+ rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
type: 'rows';
}
@@ -189,7 +231,7 @@ export interface DatasetIterrowsResponse {
/**
* The list of items for the current page
*/
- data: Array | unknown | null>>;
+ data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* Whether there are more items available after this set
@@ -202,23 +244,32 @@ export interface DatasetIterrowsResponse {
url?: string;
}
+/**
+ * Dataset resource for storing and accessing training or evaluation data.
+ */
export interface DatasetRegisterResponse {
identifier: string;
- metadata: Record | unknown | null>;
+ /**
+ * Additional metadata for the dataset
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
provider_id: string;
/**
- * Purpose of the dataset. Each purpose has a required input data schema.
+ * Purpose of the dataset indicating its intended use
*/
purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
/**
- * A dataset that can be obtained from a URI.
+ * Data source configuration for the dataset
*/
source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource;
+ /**
+ * Type of resource, always 'dataset' for datasets
+ */
type: 'dataset';
provider_resource_id?: string;
@@ -248,12 +299,19 @@ export namespace DatasetRegisterResponse {
* "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
* ]
*/
- rows: Array | unknown | null>>;
+ rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
type: 'rows';
}
}
+export interface DatasetAppendrowsParams {
+ /**
+ * The rows to append to the dataset.
+ */
+ rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+}
+
export interface DatasetIterrowsParams {
/**
* The number of rows to get.
@@ -302,7 +360,7 @@ export interface DatasetRegisterParams {
/**
* The metadata for the dataset. - E.g. {"description": "My dataset"}.
*/
- metadata?: Record | unknown | null>;
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
}
export namespace DatasetRegisterParams {
@@ -329,7 +387,7 @@ export namespace DatasetRegisterParams {
* "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
* ]
*/
- rows: Array | unknown | null>>;
+ rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
type: 'rows';
}
@@ -342,6 +400,7 @@ export declare namespace Datasets {
type DatasetListResponse as DatasetListResponse,
type DatasetIterrowsResponse as DatasetIterrowsResponse,
type DatasetRegisterResponse as DatasetRegisterResponse,
+ type DatasetAppendrowsParams as DatasetAppendrowsParams,
type DatasetIterrowsParams as DatasetIterrowsParams,
type DatasetRegisterParams as DatasetRegisterParams,
};
diff --git a/src/resources/eval/eval.ts b/src/resources/eval/eval.ts
index a4cf4f6..961b24e 100644
--- a/src/resources/eval/eval.ts
+++ b/src/resources/eval/eval.ts
@@ -64,7 +64,7 @@ export interface BenchmarkConfig {
* Map between scoring function id and parameters for each scoring function you
* want to run
*/
- scoring_params: Record;
+ scoring_params: { [key: string]: ScoringFunctionsAPI.ScoringFnParams };
/**
* (Optional) The number of examples to evaluate. If not provided, all examples in
@@ -121,17 +121,26 @@ export interface EvaluateResponse {
/**
* The generations from the evaluation.
*/
- generations: Array | unknown | null>>;
+ generations: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* The scores from the evaluation.
*/
- scores: Record;
+ scores: { [key: string]: Shared.ScoringResult };
}
+/**
+ * A job execution instance with status tracking.
+ */
export interface Job {
+ /**
+ * Unique identifier for the job
+ */
job_id: string;
+ /**
+ * Current execution status of the job
+ */
status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled';
}
@@ -144,7 +153,7 @@ export interface EvalEvaluateRowsParams {
/**
* The rows to evaluate.
*/
- input_rows: Array | unknown | null>>;
+ input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* The scoring functions to use for the evaluation.
@@ -161,7 +170,7 @@ export interface EvalEvaluateRowsAlphaParams {
/**
* The rows to evaluate.
*/
- input_rows: Array | unknown | null>>;
+ input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* The scoring functions to use for the evaluation.
diff --git a/src/resources/files.ts b/src/resources/files.ts
index 3141c6c..7671fc4 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -122,10 +122,19 @@ export interface ListFilesResponse {
*/
data: Array;
+ /**
+ * ID of the first file in the list for pagination
+ */
first_id: string;
+ /**
+ * Whether there are more files available beyond this page
+ */
has_more: boolean;
+ /**
+ * ID of the last file in the list for pagination
+ */
last_id: string;
/**
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 376eab8..ceba761 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -9,7 +9,10 @@ export {
type ToolExecutionStep,
type ToolResponse,
type AgentCreateResponse,
+ type AgentRetrieveResponse,
+ type AgentListResponse,
type AgentCreateParams,
+ type AgentListParams,
} from './agents/agents';
export {
Benchmarks,
@@ -33,6 +36,7 @@ export {
type DatasetListResponse,
type DatasetIterrowsResponse,
type DatasetRegisterResponse,
+ type DatasetAppendrowsParams,
type DatasetIterrowsParams,
type DatasetRegisterParams,
} from './datasets';
@@ -82,6 +86,7 @@ export {
type ModelListResponse,
type ModelRegisterParams,
} from './models';
+export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations';
export {
PostTraining,
type AlgorithmConfig,
diff --git a/src/resources/inference.ts b/src/resources/inference.ts
index 25177a7..ae1088b 100644
--- a/src/resources/inference.ts
+++ b/src/resources/inference.ts
@@ -30,6 +30,8 @@ export class Inference extends APIResource {
/**
* Generate a chat completion for the given messages using the specified model.
+ *
+ * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions.
*/
chatCompletion(
body: InferenceChatCompletionParamsNonStreaming,
@@ -56,6 +58,8 @@ export class Inference extends APIResource {
/**
* Generate a completion for the given content using the specified model.
+ *
+ * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.
*/
completion(
body: InferenceCompletionParamsNonStreaming,
@@ -82,6 +86,8 @@ export class Inference extends APIResource {
/**
* Generate embeddings for content pieces using the specified model.
+ *
+ * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.
*/
embeddings(
body: InferenceEmbeddingsParams,
@@ -100,6 +106,9 @@ export interface ChatCompletionResponseStreamChunk {
*/
event: ChatCompletionResponseStreamChunk.Event;
+ /**
+ * (Optional) List of metrics associated with the API response
+ */
metrics?: Array;
}
@@ -130,11 +139,23 @@ export namespace ChatCompletionResponseStreamChunk {
stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
}
+ /**
+ * A metric value included in API responses.
+ */
export interface Metric {
+ /**
+ * The name of the metric
+ */
metric: string;
+ /**
+ * The numeric value of the metric
+ */
value: number;
+ /**
+ * (Optional) The unit of measurement for the metric value
+ */
unit?: string;
}
}
@@ -158,15 +179,30 @@ export interface CompletionResponse {
*/
logprobs?: Array;
+ /**
+ * (Optional) List of metrics associated with the API response
+ */
metrics?: Array;
}
export namespace CompletionResponse {
+ /**
+ * A metric value included in API responses.
+ */
export interface Metric {
+ /**
+ * The name of the metric
+ */
metric: string;
+ /**
+ * The numeric value of the metric
+ */
value: number;
+ /**
+ * (Optional) The unit of measurement for the metric value
+ */
unit?: string;
}
}
@@ -190,10 +226,16 @@ export interface TokenLogProbs {
/**
* Dictionary mapping tokens to their log probabilities
*/
- logprobs_by_token: Record;
+ logprobs_by_token: { [key: string]: number };
}
+/**
+ * Response from a batch chat completion request.
+ */
export interface InferenceBatchChatCompletionResponse {
+ /**
+ * List of chat completion responses, one for each conversation in the batch
+ */
batch: Array;
}
@@ -284,7 +326,7 @@ export namespace InferenceBatchChatCompletionParams {
description?: string;
- parameters?: Record;
+ parameters?: { [key: string]: Shared.ToolParamDefinition };
}
}
@@ -447,7 +489,7 @@ export namespace InferenceChatCompletionParams {
description?: string;
- parameters?: Record;
+ parameters?: { [key: string]: Shared.ToolParamDefinition };
}
export type InferenceChatCompletionParamsNonStreaming =
diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts
index c54b5c8..4e5d87c 100644
--- a/src/resources/inspect.ts
+++ b/src/resources/inspect.ts
@@ -5,7 +5,7 @@ import * as Core from '../core';
export class Inspect extends APIResource {
/**
- * Get the health of the service.
+ * Get the current health status of the service.
*/
health(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/health', options);
@@ -19,31 +19,75 @@ export class Inspect extends APIResource {
}
}
+/**
+ * Health status information for the service.
+ */
export interface HealthInfo {
+ /**
+ * Current health status of the service
+ */
status: 'OK' | 'Error' | 'Not Implemented';
}
+/**
+ * Information about a registered provider including its configuration and health
+ * status.
+ */
export interface ProviderInfo {
+ /**
+ * The API name this provider implements
+ */
api: string;
- config: Record | unknown | null>;
+ /**
+ * Configuration parameters for the provider
+ */
+ config: { [key: string]: boolean | number | string | Array | unknown | null };
- health: Record | unknown | null>;
+ /**
+ * Current health status of the provider
+ */
+ health: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Unique identifier for the provider
+ */
provider_id: string;
+ /**
+ * The type of provider implementation
+ */
provider_type: string;
}
+/**
+ * Information about an API route including its path, method, and implementing
+ * providers.
+ */
export interface RouteInfo {
+ /**
+ * HTTP method for the route
+ */
method: string;
+ /**
+ * List of provider types that implement this route
+ */
provider_types: Array;
+ /**
+ * The API endpoint path
+ */
route: string;
}
+/**
+ * Version information for the service.
+ */
export interface VersionInfo {
+ /**
+ * Version number of the service
+ */
version: string;
}
diff --git a/src/resources/models.ts b/src/resources/models.ts
index 50ac191..555a7a7 100644
--- a/src/resources/models.ts
+++ b/src/resources/models.ts
@@ -42,17 +42,38 @@ export interface ListModelsResponse {
data: ModelListResponse;
}
+/**
+ * A model resource representing an AI model registered in Llama Stack.
+ */
export interface Model {
+ /**
+ * Unique identifier for this resource in llama stack
+ */
identifier: string;
- metadata: Record | unknown | null>;
+ /**
+ * Any additional metadata for this model
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * The type of model (LLM or embedding model)
+ */
model_type: 'llm' | 'embedding';
+ /**
+ * ID of the provider that owns this resource
+ */
provider_id: string;
+ /**
+ * The resource type, always 'model' for model resources
+ */
type: 'model';
+ /**
+ * Unique identifier for this resource in the provider
+ */
provider_resource_id?: string;
}
@@ -67,7 +88,7 @@ export interface ModelRegisterParams {
/**
* Any additional metadata for this model.
*/
- metadata?: Record | unknown | null>;
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The type of model to register.
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
new file mode 100644
index 0000000..aee9b57
--- /dev/null
+++ b/src/resources/moderations.ts
@@ -0,0 +1,85 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../resource';
+import * as Core from '../core';
+
+export class Moderations extends APIResource {
+ /**
+ * Classifies if text and/or image inputs are potentially harmful.
+ */
+ create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/openai/v1/moderations', { body, ...options });
+ }
+}
+
+/**
+ * A moderation object.
+ */
+export interface CreateResponse {
+ /**
+ * The unique identifier for the moderation request.
+ */
+ id: string;
+
+ /**
+ * The model used to generate the moderation results.
+ */
+ model: string;
+
+ /**
+ * A list of moderation objects
+ */
+ results: Array;
+}
+
+export namespace CreateResponse {
+ /**
+ * A moderation object.
+ */
+ export interface Result {
+ /**
+ * Whether any of the below categories are flagged.
+ */
+ flagged: boolean;
+
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * A list of the categories, and whether they are flagged or not.
+ */
+ categories?: { [key: string]: boolean };
+
+ /**
+ * A list of the categories along with the input type(s) that the score applies to.
+ */
+ category_applied_input_types?: { [key: string]: Array };
+
+ /**
+ * A list of the categories along with their scores as predicted by model. Required
+ * set of categories that need to be in response - violence - violence/graphic -
+ * harassment - harassment/threatening - hate - hate/threatening - illicit -
+ * illicit/violent - sexual - sexual/minors - self-harm - self-harm/intent -
+ * self-harm/instructions
+ */
+ category_scores?: { [key: string]: number };
+
+ user_message?: string;
+ }
+}
+
+export interface ModerationCreateParams {
+ /**
+ * Input (or inputs) to classify. Can be a single string, an array of strings, or
+ * an array of multi-modal input objects similar to other models.
+ */
+ input: string | Array;
+
+ /**
+ * The content moderation model you would like to use.
+ */
+ model: string;
+}
+
+export declare namespace Moderations {
+ export { type CreateResponse as CreateResponse, type ModerationCreateParams as ModerationCreateParams };
+}
diff --git a/src/resources/post-training/job.ts b/src/resources/post-training/job.ts
index b7f9ec6..a250ac9 100644
--- a/src/resources/post-training/job.ts
+++ b/src/resources/post-training/job.ts
@@ -2,7 +2,6 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
-import { ListPostTrainingJobsResponse } from './post-training';
import * as PostTrainingAPI from './post-training';
export class Job extends APIResource {
@@ -57,30 +56,185 @@ export namespace JobListResponse {
* Artifacts of a finetuning job.
*/
export interface JobArtifactsResponse {
- checkpoints: Array;
+ /**
+ * List of model checkpoints created during training
+ */
+ checkpoints: Array;
+ /**
+ * Unique identifier for the training job
+ */
job_uuid: string;
}
+export namespace JobArtifactsResponse {
+ /**
+ * Checkpoint created during training runs.
+ */
+ export interface Checkpoint {
+ /**
+ * Timestamp when the checkpoint was created
+ */
+ created_at: string;
+
+ /**
+ * Training epoch when the checkpoint was saved
+ */
+ epoch: number;
+
+ /**
+ * Unique identifier for the checkpoint
+ */
+ identifier: string;
+
+ /**
+ * File system path where the checkpoint is stored
+ */
+ path: string;
+
+ /**
+ * Identifier of the training job that created this checkpoint
+ */
+ post_training_job_id: string;
+
+ /**
+ * (Optional) Training metrics associated with this checkpoint
+ */
+ training_metrics?: Checkpoint.TrainingMetrics;
+ }
+
+ export namespace Checkpoint {
+ /**
+ * (Optional) Training metrics associated with this checkpoint
+ */
+ export interface TrainingMetrics {
+ /**
+ * Training epoch number
+ */
+ epoch: number;
+
+ /**
+ * Perplexity metric indicating model confidence
+ */
+ perplexity: number;
+
+ /**
+ * Loss value on the training dataset
+ */
+ train_loss: number;
+
+ /**
+ * Loss value on the validation dataset
+ */
+ validation_loss: number;
+ }
+ }
+}
+
/**
* Status of a finetuning job.
*/
export interface JobStatusResponse {
- checkpoints: Array;
+ /**
+ * List of model checkpoints created during training
+ */
+ checkpoints: Array;
+ /**
+ * Unique identifier for the training job
+ */
job_uuid: string;
+ /**
+ * Current status of the training job
+ */
status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled';
+ /**
+ * (Optional) Timestamp when the job finished, if completed
+ */
completed_at?: string;
- resources_allocated?: Record | unknown | null>;
+ /**
+ * (Optional) Information about computational resources allocated to the job
+ */
+ resources_allocated?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Timestamp when the job was scheduled
+ */
scheduled_at?: string;
+ /**
+ * (Optional) Timestamp when the job execution began
+ */
started_at?: string;
}
+export namespace JobStatusResponse {
+ /**
+ * Checkpoint created during training runs.
+ */
+ export interface Checkpoint {
+ /**
+ * Timestamp when the checkpoint was created
+ */
+ created_at: string;
+
+ /**
+ * Training epoch when the checkpoint was saved
+ */
+ epoch: number;
+
+ /**
+ * Unique identifier for the checkpoint
+ */
+ identifier: string;
+
+ /**
+ * File system path where the checkpoint is stored
+ */
+ path: string;
+
+ /**
+ * Identifier of the training job that created this checkpoint
+ */
+ post_training_job_id: string;
+
+ /**
+ * (Optional) Training metrics associated with this checkpoint
+ */
+ training_metrics?: Checkpoint.TrainingMetrics;
+ }
+
+ export namespace Checkpoint {
+ /**
+ * (Optional) Training metrics associated with this checkpoint
+ */
+ export interface TrainingMetrics {
+ /**
+ * Training epoch number
+ */
+ epoch: number;
+
+ /**
+ * Perplexity metric indicating model confidence
+ */
+ perplexity: number;
+
+ /**
+ * Loss value on the training dataset
+ */
+ train_loss: number;
+
+ /**
+ * Loss value on the validation dataset
+ */
+ validation_loss: number;
+ }
+ }
+}
+
export interface JobArtifactsParams {
/**
* The UUID of the job to get the artifacts of.
diff --git a/src/resources/post-training/post-training.ts b/src/resources/post-training/post-training.ts
index dde6cd5..8f6eb3f 100644
--- a/src/resources/post-training/post-training.ts
+++ b/src/resources/post-training/post-training.ts
@@ -37,32 +37,74 @@ export class PostTraining extends APIResource {
}
}
+/**
+ * Configuration for Low-Rank Adaptation (LoRA) fine-tuning.
+ */
export type AlgorithmConfig = AlgorithmConfig.LoraFinetuningConfig | AlgorithmConfig.QatFinetuningConfig;
export namespace AlgorithmConfig {
+ /**
+ * Configuration for Low-Rank Adaptation (LoRA) fine-tuning.
+ */
export interface LoraFinetuningConfig {
+ /**
+ * LoRA scaling parameter that controls adaptation strength
+ */
alpha: number;
+ /**
+ * Whether to apply LoRA to MLP layers
+ */
apply_lora_to_mlp: boolean;
+ /**
+ * Whether to apply LoRA to output projection layers
+ */
apply_lora_to_output: boolean;
+ /**
+ * List of attention module names to apply LoRA to
+ */
lora_attn_modules: Array;
+ /**
+ * Rank of the LoRA adaptation (lower rank = fewer parameters)
+ */
rank: number;
+ /**
+ * Algorithm type identifier, always "LoRA"
+ */
type: 'LoRA';
+ /**
+ * (Optional) Whether to quantize the base model weights
+ */
quantize_base?: boolean;
+ /**
+ * (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation)
+ */
use_dora?: boolean;
}
+ /**
+ * Configuration for Quantization-Aware Training (QAT) fine-tuning.
+ */
export interface QatFinetuningConfig {
+ /**
+ * Size of groups for grouped quantization
+ */
group_size: number;
+ /**
+ * Name of the quantization algorithm to use
+ */
quantizer_name: string;
+ /**
+ * Algorithm type identifier, always "QAT"
+ */
type: 'QAT';
}
}
@@ -95,7 +137,7 @@ export interface PostTrainingPreferenceOptimizeParams {
/**
* The hyperparam search configuration.
*/
- hyperparam_search_config: Record | unknown | null>;
+ hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The UUID of the job to create.
@@ -105,7 +147,7 @@ export interface PostTrainingPreferenceOptimizeParams {
/**
* The logger configuration.
*/
- logger_config: Record | unknown | null>;
+ logger_config: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The training configuration.
@@ -118,70 +160,151 @@ export namespace PostTrainingPreferenceOptimizeParams {
* The algorithm configuration.
*/
export interface AlgorithmConfig {
- epsilon: number;
-
- gamma: number;
-
- reward_clip: number;
-
- reward_scale: number;
+ /**
+ * Temperature parameter for the DPO loss
+ */
+ beta: number;
+
+ /**
+ * The type of loss function to use for DPO
+ */
+ loss_type: 'sigmoid' | 'hinge' | 'ipo' | 'kto_pair';
}
/**
* The training configuration.
*/
export interface TrainingConfig {
+ /**
+ * Number of steps to accumulate gradients before updating
+ */
gradient_accumulation_steps: number;
+ /**
+ * Maximum number of steps to run per epoch
+ */
max_steps_per_epoch: number;
+ /**
+ * Number of training epochs to run
+ */
n_epochs: number;
+ /**
+ * (Optional) Configuration for data loading and formatting
+ */
data_config?: TrainingConfig.DataConfig;
+ /**
+ * (Optional) Data type for model parameters (bf16, fp16, fp32)
+ */
dtype?: string;
+ /**
+ * (Optional) Configuration for memory and compute optimizations
+ */
efficiency_config?: TrainingConfig.EfficiencyConfig;
+ /**
+ * (Optional) Maximum number of validation steps per epoch
+ */
max_validation_steps?: number;
+ /**
+ * (Optional) Configuration for the optimization algorithm
+ */
optimizer_config?: TrainingConfig.OptimizerConfig;
}
export namespace TrainingConfig {
+ /**
+ * (Optional) Configuration for data loading and formatting
+ */
export interface DataConfig {
+ /**
+ * Number of samples per training batch
+ */
batch_size: number;
+ /**
+ * Format of the dataset (instruct or dialog)
+ */
data_format: 'instruct' | 'dialog';
+ /**
+ * Unique identifier for the training dataset
+ */
dataset_id: string;
+ /**
+ * Whether to shuffle the dataset during training
+ */
shuffle: boolean;
+ /**
+ * (Optional) Whether to pack multiple samples into a single sequence for
+ * efficiency
+ */
packed?: boolean;
+ /**
+ * (Optional) Whether to compute loss on input tokens as well as output tokens
+ */
train_on_input?: boolean;
+ /**
+ * (Optional) Unique identifier for the validation dataset
+ */
validation_dataset_id?: string;
}
+ /**
+ * (Optional) Configuration for memory and compute optimizations
+ */
export interface EfficiencyConfig {
+ /**
+ * (Optional) Whether to use activation checkpointing to reduce memory usage
+ */
enable_activation_checkpointing?: boolean;
+ /**
+ * (Optional) Whether to offload activations to CPU to save GPU memory
+ */
enable_activation_offloading?: boolean;
+ /**
+ * (Optional) Whether to offload FSDP parameters to CPU
+ */
fsdp_cpu_offload?: boolean;
+ /**
+ * (Optional) Whether to use memory-efficient FSDP wrapping
+ */
memory_efficient_fsdp_wrap?: boolean;
}
+ /**
+ * (Optional) Configuration for the optimization algorithm
+ */
export interface OptimizerConfig {
+ /**
+ * Learning rate for the optimizer
+ */
lr: number;
+ /**
+ * Number of steps for learning rate warmup
+ */
num_warmup_steps: number;
+ /**
+ * Type of optimizer to use (adam, adamw, or sgd)
+ */
optimizer_type: 'adam' | 'adamw' | 'sgd';
+ /**
+ * Weight decay coefficient for regularization
+ */
weight_decay: number;
}
}
@@ -191,7 +314,7 @@ export interface PostTrainingSupervisedFineTuneParams {
/**
* The hyperparam search configuration.
*/
- hyperparam_search_config: Record | unknown | null>;
+ hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The UUID of the job to create.
@@ -201,7 +324,7 @@ export interface PostTrainingSupervisedFineTuneParams {
/**
* The logger configuration.
*/
- logger_config: Record | unknown | null>;
+ logger_config: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The training configuration.
@@ -229,57 +352,136 @@ export namespace PostTrainingSupervisedFineTuneParams {
* The training configuration.
*/
export interface TrainingConfig {
+ /**
+ * Number of steps to accumulate gradients before updating
+ */
gradient_accumulation_steps: number;
+ /**
+ * Maximum number of steps to run per epoch
+ */
max_steps_per_epoch: number;
+ /**
+ * Number of training epochs to run
+ */
n_epochs: number;
+ /**
+ * (Optional) Configuration for data loading and formatting
+ */
data_config?: TrainingConfig.DataConfig;
+ /**
+ * (Optional) Data type for model parameters (bf16, fp16, fp32)
+ */
dtype?: string;
+ /**
+ * (Optional) Configuration for memory and compute optimizations
+ */
efficiency_config?: TrainingConfig.EfficiencyConfig;
+ /**
+ * (Optional) Maximum number of validation steps per epoch
+ */
max_validation_steps?: number;
+ /**
+ * (Optional) Configuration for the optimization algorithm
+ */
optimizer_config?: TrainingConfig.OptimizerConfig;
}
export namespace TrainingConfig {
+ /**
+ * (Optional) Configuration for data loading and formatting
+ */
export interface DataConfig {
+ /**
+ * Number of samples per training batch
+ */
batch_size: number;
+ /**
+ * Format of the dataset (instruct or dialog)
+ */
data_format: 'instruct' | 'dialog';
+ /**
+ * Unique identifier for the training dataset
+ */
dataset_id: string;
+ /**
+ * Whether to shuffle the dataset during training
+ */
shuffle: boolean;
+ /**
+ * (Optional) Whether to pack multiple samples into a single sequence for
+ * efficiency
+ */
packed?: boolean;
+ /**
+ * (Optional) Whether to compute loss on input tokens as well as output tokens
+ */
train_on_input?: boolean;
+ /**
+ * (Optional) Unique identifier for the validation dataset
+ */
validation_dataset_id?: string;
}
+ /**
+ * (Optional) Configuration for memory and compute optimizations
+ */
export interface EfficiencyConfig {
+ /**
+ * (Optional) Whether to use activation checkpointing to reduce memory usage
+ */
enable_activation_checkpointing?: boolean;
+ /**
+ * (Optional) Whether to offload activations to CPU to save GPU memory
+ */
enable_activation_offloading?: boolean;
+ /**
+ * (Optional) Whether to offload FSDP parameters to CPU
+ */
fsdp_cpu_offload?: boolean;
+ /**
+ * (Optional) Whether to use memory-efficient FSDP wrapping
+ */
memory_efficient_fsdp_wrap?: boolean;
}
+ /**
+ * (Optional) Configuration for the optimization algorithm
+ */
export interface OptimizerConfig {
+ /**
+ * Learning rate for the optimizer
+ */
lr: number;
+ /**
+ * Number of steps for learning rate warmup
+ */
num_warmup_steps: number;
+ /**
+ * Type of optimizer to use (adam, adamw, or sgd)
+ */
optimizer_type: 'adam' | 'adamw' | 'sgd';
+ /**
+ * Weight decay coefficient for regularization
+ */
weight_decay: number;
}
}
diff --git a/src/resources/providers.ts b/src/resources/providers.ts
index dd37a42..d27b9ab 100644
--- a/src/resources/providers.ts
+++ b/src/resources/providers.ts
@@ -22,10 +22,19 @@ export class Providers extends APIResource {
}
}
+/**
+ * Response containing a list of all available providers.
+ */
export interface ListProvidersResponse {
+ /**
+ * List of provider information objects
+ */
data: ProviderListResponse;
}
+/**
+ * List of provider information objects
+ */
export type ProviderListResponse = Array;
export declare namespace Providers {
diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts
index c58085f..ff21948 100644
--- a/src/resources/responses/input-items.ts
+++ b/src/resources/responses/input-items.ts
@@ -26,7 +26,13 @@ export class InputItems extends APIResource {
}
}
+/**
+ * List container for OpenAI response input items.
+ */
export interface InputItemListResponse {
+ /**
+ * List of input items
+ */
data: Array<
| InputItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
@@ -35,41 +41,95 @@ export interface InputItemListResponse {
| InputItemListResponse.OpenAIResponseMessage
>;
+ /**
+ * Object type identifier, always "list"
+ */
object: 'list';
}
export namespace InputItemListResponse {
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
@@ -113,25 +173,132 @@ export namespace InputItemListResponse {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
}
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 116fa11..f0d4d20 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -58,15 +58,33 @@ export class Responses extends APIResource {
}
}
+/**
+ * Complete OpenAI response object containing generation results and metadata.
+ */
export interface ResponseObject {
+ /**
+ * Unique identifier for this response
+ */
id: string;
+ /**
+ * Unix timestamp when the response was created
+ */
created_at: number;
+ /**
+ * Model identifier used for generation
+ */
model: string;
+ /**
+ * Object type identifier, always "response"
+ */
object: 'response';
+ /**
+ * List of generated output items (messages, tool calls, etc.)
+ */
output: Array<
| ResponseObject.OpenAIResponseMessage
| ResponseObject.OpenAIResponseOutputMessageWebSearchToolCall
@@ -76,22 +94,49 @@ export interface ResponseObject {
| ResponseObject.OpenAIResponseOutputMessageMcpListTools
>;
+ /**
+ * Whether tool calls can be executed in parallel
+ */
parallel_tool_calls: boolean;
+ /**
+ * Current status of the response generation
+ */
status: string;
+ /**
+ * Text formatting configuration for the response
+ */
text: ResponseObject.Text;
+ /**
+ * (Optional) Error details if the response generation failed
+ */
error?: ResponseObject.Error;
+ /**
+ * (Optional) ID of the previous response in a conversation
+ */
previous_response_id?: string;
+ /**
+ * (Optional) Sampling temperature used for generation
+ */
temperature?: number;
+ /**
+ * (Optional) Nucleus sampling parameter used for generation
+ */
top_p?: number;
+ /**
+ * (Optional) Truncation strategy applied to the response
+ */
truncation?: string;
+ /**
+ * (Optional) User identifier associated with the request
+ */
user?: string;
}
@@ -120,107 +165,319 @@ export namespace ResponseObject {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
id: string;
+ /**
+ * JSON string containing the MCP call arguments
+ */
arguments: string;
+ /**
+ * Name of the MCP method being called
+ */
name: string;
+ /**
+ * Label identifying the MCP server handling the call
+ */
server_label: string;
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
type: 'mcp_call';
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
error?: string;
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
output?: string;
}
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
id: string;
+ /**
+ * Label identifying the MCP server providing the tools
+ */
server_label: string;
+ /**
+ * List of available tools provided by the MCP server
+ */
tools: Array;
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
type: 'mcp_list_tools';
}
export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
export interface Tool {
- input_schema: Record | unknown | null>;
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Name of the tool
+ */
name: string;
+ /**
+ * (Optional) Description of what the tool does
+ */
description?: string;
}
}
+ /**
+ * Text formatting configuration for the response
+ */
export interface Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
format?: Text.Format;
}
export namespace Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
export interface Format {
/**
@@ -242,7 +499,7 @@ export namespace ResponseObject {
* The JSON schema the response should conform to. In a Python SDK, this is often a
* `pydantic` model. Only used for json_schema.
*/
- schema?: Record | unknown | null>;
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) Whether to strictly enforce the JSON schema. If true, the response
@@ -252,13 +509,25 @@ export namespace ResponseObject {
}
}
+ /**
+ * (Optional) Error details if the response generation failed
+ */
export interface Error {
+ /**
+ * Error code identifying the type of failure
+ */
code: string;
+ /**
+ * Human-readable error message describing the failure
+ */
message: string;
}
}
+/**
+ * Streaming event indicating a new response has been created.
+ */
export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded
@@ -281,17 +550,27 @@ export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted;
export namespace ResponseObjectStream {
+ /**
+ * Streaming event indicating a new response has been created.
+ */
export interface OpenAIResponseObjectStreamResponseCreated {
+ /**
+ * The newly created response object
+ */
response: ResponsesAPI.ResponseObject;
+ /**
+ * Event type identifier, always "response.created"
+ */
type: 'response.created';
}
+ /**
+ * Streaming event for when a new output item is added to the response.
+ */
export interface OpenAIResponseObjectStreamResponseOutputItemAdded {
/**
- * Corresponds to the various Message types in the Responses API. They are all
- * under one type because the Responses API gives them all the same "type" value,
- * and there is no way to tell them apart in certain scenarios.
+ * The output item that was added (message, tool call, etc.)
*/
item:
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage
@@ -301,12 +580,24 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools;
+ /**
+ * Index position of this item in the output list
+ */
output_index: number;
+ /**
+ * Unique identifier of the response containing this output
+ */
response_id: string;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.output_item.added"
+ */
type: 'response.output_item.added';
}
@@ -335,103 +626,313 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageMcpCall {
- id: string;
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+ /**
+ * JSON string containing the MCP call arguments
+ */
arguments: string;
+ /**
+ * Name of the MCP method being called
+ */
name: string;
+ /**
+ * Label identifying the MCP server handling the call
+ */
server_label: string;
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
type: 'mcp_call';
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
error?: string;
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
output?: string;
}
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
id: string;
+ /**
+ * Label identifying the MCP server providing the tools
+ */
server_label: string;
+ /**
+ * List of available tools provided by the MCP server
+ */
tools: Array;
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
type: 'mcp_list_tools';
}
export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
export interface Tool {
- input_schema: Record | unknown | null>;
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Name of the tool
+ */
name: string;
+ /**
+ * (Optional) Description of what the tool does
+ */
description?: string;
}
}
}
+ /**
+ * Streaming event for when an output item is completed.
+ */
export interface OpenAIResponseObjectStreamResponseOutputItemDone {
/**
- * Corresponds to the various Message types in the Responses API. They are all
- * under one type because the Responses API gives them all the same "type" value,
- * and there is no way to tell them apart in certain scenarios.
+ * The completed output item (message, tool call, etc.)
*/
item:
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMessage
@@ -441,12 +942,24 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools;
+ /**
+ * Index position of this item in the output list
+ */
output_index: number;
+ /**
+ * Unique identifier of the response containing this output
+ */
response_id: string;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.output_item.done"
+ */
type: 'response.output_item.done';
}
@@ -475,157 +988,459 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
id: string;
+ /**
+ * JSON string containing the MCP call arguments
+ */
arguments: string;
+ /**
+ * Name of the MCP method being called
+ */
name: string;
+ /**
+ * Label identifying the MCP server handling the call
+ */
server_label: string;
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
type: 'mcp_call';
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
error?: string;
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
output?: string;
}
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
id: string;
+ /**
+ * Label identifying the MCP server providing the tools
+ */
server_label: string;
+ /**
+ * List of available tools provided by the MCP server
+ */
tools: Array;
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
type: 'mcp_list_tools';
}
export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
export interface Tool {
- input_schema: Record | unknown | null>;
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Name of the tool
+ */
name: string;
+ /**
+ * (Optional) Description of what the tool does
+ */
description?: string;
}
}
}
+ /**
+ * Streaming event for incremental text content updates.
+ */
export interface OpenAIResponseObjectStreamResponseOutputTextDelta {
+ /**
+ * Index position within the text content
+ */
content_index: number;
+ /**
+ * Incremental text content being added
+ */
delta: string;
+ /**
+ * Unique identifier of the output item being updated
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.output_text.delta"
+ */
type: 'response.output_text.delta';
}
+ /**
+ * Streaming event for when text output is completed.
+ */
export interface OpenAIResponseObjectStreamResponseOutputTextDone {
+ /**
+ * Index position within the text content
+ */
content_index: number;
+ /**
+ * Unique identifier of the completed output item
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Final complete text content of the output item
+ */
text: string;
+ /**
+ * Event type identifier, always "response.output_text.done"
+ */
type: 'response.output_text.done';
}
+ /**
+ * Streaming event for incremental function call argument updates.
+ */
export interface OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta {
+ /**
+ * Incremental function call arguments being added
+ */
delta: string;
+ /**
+ * Unique identifier of the function call being updated
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.function_call_arguments.delta"
+ */
type: 'response.function_call_arguments.delta';
}
+ /**
+ * Streaming event for when function call arguments are completed.
+ */
export interface OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone {
+ /**
+ * Final complete arguments JSON string for the function call
+ */
arguments: string;
+ /**
+ * Unique identifier of the completed function call
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.function_call_arguments.done"
+ */
type: 'response.function_call_arguments.done';
}
+ /**
+ * Streaming event for web search calls in progress.
+ */
export interface OpenAIResponseObjectStreamResponseWebSearchCallInProgress {
+ /**
+ * Unique identifier of the web search call
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.web_search_call.in_progress"
+ */
type: 'response.web_search_call.in_progress';
}
@@ -639,13 +1454,28 @@ export namespace ResponseObjectStream {
type: 'response.web_search_call.searching';
}
+ /**
+ * Streaming event for completed web search calls.
+ */
export interface OpenAIResponseObjectStreamResponseWebSearchCallCompleted {
+ /**
+ * Unique identifier of the completed web search call
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.web_search_call.completed"
+ */
type: 'response.web_search_call.completed';
}
@@ -691,53 +1521,125 @@ export namespace ResponseObjectStream {
type: 'response.mcp_call.arguments.done';
}
+ /**
+ * Streaming event for MCP calls in progress.
+ */
export interface OpenAIResponseObjectStreamResponseMcpCallInProgress {
+ /**
+ * Unique identifier of the MCP call
+ */
item_id: string;
+ /**
+ * Index position of the item in the output list
+ */
output_index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.mcp_call.in_progress"
+ */
type: 'response.mcp_call.in_progress';
}
+ /**
+ * Streaming event for failed MCP calls.
+ */
export interface OpenAIResponseObjectStreamResponseMcpCallFailed {
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.mcp_call.failed"
+ */
type: 'response.mcp_call.failed';
}
+ /**
+ * Streaming event for completed MCP calls.
+ */
export interface OpenAIResponseObjectStreamResponseMcpCallCompleted {
+ /**
+ * Sequential number for ordering streaming events
+ */
sequence_number: number;
+ /**
+ * Event type identifier, always "response.mcp_call.completed"
+ */
type: 'response.mcp_call.completed';
}
+ /**
+ * Streaming event indicating a response has been completed.
+ */
export interface OpenAIResponseObjectStreamResponseCompleted {
+ /**
+ * The completed response object
+ */
response: ResponsesAPI.ResponseObject;
+ /**
+ * Event type identifier, always "response.completed"
+ */
type: 'response.completed';
}
}
+/**
+ * Paginated list of OpenAI response objects with navigation metadata.
+ */
export interface ResponseListResponse {
+ /**
+ * List of response objects with their input context
+ */
data: Array;
+ /**
+ * Identifier of the first item in this page
+ */
first_id: string;
+ /**
+ * Whether there are more results available beyond this page
+ */
has_more: boolean;
+ /**
+ * Identifier of the last item in this page
+ */
last_id: string;
+ /**
+ * Object type identifier, always "list"
+ */
object: 'list';
}
export namespace ResponseListResponse {
+ /**
+ * OpenAI response object extended with input context information.
+ */
export interface Data {
+ /**
+ * Unique identifier for this response
+ */
id: string;
+ /**
+ * Unix timestamp when the response was created
+ */
created_at: number;
+ /**
+ * List of input items that led to this response
+ */
input: Array<
| Data.OpenAIResponseOutputMessageWebSearchToolCall
| Data.OpenAIResponseOutputMessageFileSearchToolCall
@@ -746,10 +1648,19 @@ export namespace ResponseListResponse {
| Data.OpenAIResponseMessage
>;
+ /**
+ * Model identifier used for generation
+ */
model: string;
+ /**
+ * Object type identifier, always "response"
+ */
object: 'response';
+ /**
+ * List of generated output items (messages, tool calls, etc.)
+ */
output: Array<
| Data.OpenAIResponseMessage
| Data.OpenAIResponseOutputMessageWebSearchToolCall
@@ -759,57 +1670,135 @@ export namespace ResponseListResponse {
| Data.OpenAIResponseOutputMessageMcpListTools
>;
+ /**
+ * Whether tool calls can be executed in parallel
+ */
parallel_tool_calls: boolean;
+ /**
+ * Current status of the response generation
+ */
status: string;
+ /**
+ * Text formatting configuration for the response
+ */
text: Data.Text;
+ /**
+ * (Optional) Error details if the response generation failed
+ */
error?: Data.Error;
+ /**
+ * (Optional) ID of the previous response in a conversation
+ */
previous_response_id?: string;
+ /**
+ * (Optional) Sampling temperature used for generation
+ */
temperature?: number;
+ /**
+ * (Optional) Nucleus sampling parameter used for generation
+ */
top_p?: number;
+ /**
+ * (Optional) Truncation strategy applied to the response
+ */
truncation?: string;
+ /**
+ * (Optional) User identifier associated with the request
+ */
user?: string;
}
export namespace Data {
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
@@ -853,25 +1842,132 @@ export namespace ResponseListResponse {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
/**
@@ -888,117 +1984,329 @@ export namespace ResponseListResponse {
>
| Array;
- role: 'system' | 'developer' | 'user' | 'assistant';
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
- type: 'message';
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
- id?: string;
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
- status?: string;
- }
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
- export namespace OpenAIResponseMessage {
- export interface OpenAIResponseInputMessageContentText {
- text: string;
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
- type: 'input_text';
- }
+ end_index: number;
- export interface OpenAIResponseInputMessageContentImage {
- detail: 'low' | 'high' | 'auto';
+ file_id: string;
- type: 'input_image';
+ filename: string;
- image_url?: string;
- }
+ start_index: number;
- export interface UnionMember2 {
- text: string;
+ type: 'container_file_citation';
+ }
- type: 'output_text';
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
}
}
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
id: string;
+ /**
+ * JSON string containing the MCP call arguments
+ */
arguments: string;
+ /**
+ * Name of the MCP method being called
+ */
name: string;
+ /**
+ * Label identifying the MCP server handling the call
+ */
server_label: string;
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
type: 'mcp_call';
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
error?: string;
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
output?: string;
}
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
id: string;
+ /**
+ * Label identifying the MCP server providing the tools
+ */
server_label: string;
+ /**
+ * List of available tools provided by the MCP server
+ */
tools: Array;
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
type: 'mcp_list_tools';
}
export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
export interface Tool {
- input_schema: Record | unknown | null>;
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Name of the tool
+ */
name: string;
+ /**
+ * (Optional) Description of what the tool does
+ */
description?: string;
}
}
+ /**
+ * Text formatting configuration for the response
+ */
export interface Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
format?: Text.Format;
}
export namespace Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
export interface Format {
/**
@@ -1020,7 +2328,7 @@ export namespace ResponseListResponse {
* The JSON schema the response should conform to. In a Python SDK, this is often a
* `pydantic` model. Only used for json_schema.
*/
- schema?: Record | unknown | null>;
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) Whether to strictly enforce the JSON schema. If true, the response
@@ -1030,9 +2338,18 @@ export namespace ResponseListResponse {
}
}
+ /**
+ * (Optional) Error details if the response generation failed
+ */
export interface Error {
+ /**
+ * Error code identifying the type of failure
+ */
code: string;
+ /**
+ * Human-readable error message describing the failure
+ */
message: string;
}
}
@@ -1076,6 +2393,9 @@ export interface ResponseCreateParamsBase {
temperature?: number;
+ /**
+ * Text response configuration for OpenAI responses.
+ */
text?: ResponseCreateParams.Text;
tools?: Array<
@@ -1087,37 +2407,88 @@ export interface ResponseCreateParamsBase {
}
export namespace ResponseCreateParams {
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * Current status of the web search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
type: 'web_search_call';
}
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
id: string;
+ /**
+ * List of search queries executed
+ */
queries: Array;
+ /**
+ * Current status of the file search operation
+ */
status: string;
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
type: 'file_search_call';
- results?: Array | unknown | null>>;
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
arguments: string;
+ /**
+ * Unique identifier for the function call
+ */
call_id: string;
+ /**
+ * Name of the function being called
+ */
name: string;
+ /**
+ * Tool call type identifier, always "function_call"
+ */
type: 'function_call';
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
id?: string;
+ /**
+ * (Optional) Current status of the function call execution
+ */
status?: string;
}
@@ -1161,37 +2532,147 @@ export namespace ResponseCreateParams {
}
export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
text: string;
+ /**
+ * Content type identifier, always "input_text"
+ */
type: 'input_text';
}
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
detail: 'low' | 'high' | 'auto';
+ /**
+ * Content type identifier, always "input_image"
+ */
type: 'input_image';
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
text: string;
type: 'output_text';
}
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
}
+ /**
+ * Text response configuration for OpenAI responses.
+ */
export interface Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
format?: Text.Format;
}
export namespace Text {
/**
- * Configuration for Responses API text format.
+ * (Optional) Text format configuration specifying output format requirements
*/
export interface Format {
/**
@@ -1213,7 +2694,7 @@ export namespace ResponseCreateParams {
* The JSON schema the response should conform to. In a Python SDK, this is often a
* `pydantic` model. Only used for json_schema.
*/
- schema?: Record | unknown | null>;
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* (Optional) Whether to strictly enforce the JSON schema. If true, the response
@@ -1223,66 +2704,156 @@ export namespace ResponseCreateParams {
}
}
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
export interface OpenAIResponseInputToolWebSearch {
- type: 'web_search' | 'web_search_preview_2025_03_11';
+ /**
+ * Web search tool type variant to use
+ */
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
+ /**
+ * (Optional) Size of search context, must be "low", "medium", or "high"
+ */
search_context_size?: string;
}
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
export interface OpenAIResponseInputToolFileSearch {
+ /**
+ * Tool type identifier, always "file_search"
+ */
type: 'file_search';
+ /**
+ * List of vector store identifiers to search within
+ */
vector_store_ids: Array;
- filters?: Record | unknown | null>;
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
max_num_results?: number;
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
}
export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
export interface RankingOptions {
+ /**
+ * (Optional) Name of the ranking algorithm to use
+ */
ranker?: string;
+ /**
+ * (Optional) Minimum relevance score threshold for results
+ */
score_threshold?: number;
}
}
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
+ */
name: string;
+ /**
+ * Tool type identifier, always "function"
+ */
type: 'function';
+ /**
+ * (Optional) Description of what the function does
+ */
description?: string;
- parameters?: Record | unknown | null>;
+ /**
+ * (Optional) JSON schema defining the function's parameters
+ */
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Whether to enforce strict parameter validation
+ */
strict?: boolean;
}
+ /**
+ * Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
+ */
export interface OpenAIResponseInputToolMcp {
+ /**
+ * Approval requirement for tool calls ("always", "never", or filter)
+ */
require_approval: 'always' | 'never' | OpenAIResponseInputToolMcp.ApprovalFilter;
+ /**
+ * Label to identify this MCP server
+ */
server_label: string;
+ /**
+ * URL endpoint of the MCP server
+ */
server_url: string;
+ /**
+ * Tool type identifier, always "mcp"
+ */
type: 'mcp';
+ /**
+ * (Optional) Restriction on which tools can be used from this server
+ */
allowed_tools?: Array | OpenAIResponseInputToolMcp.AllowedToolsFilter;
- headers?: Record | unknown | null>;
+ /**
+ * (Optional) HTTP headers to include when connecting to the server
+ */
+ headers?: { [key: string]: boolean | number | string | Array | unknown | null };
}
export namespace OpenAIResponseInputToolMcp {
+ /**
+ * Filter configuration for MCP tool approval requirements.
+ */
export interface ApprovalFilter {
+ /**
+ * (Optional) List of tool names that always require approval
+ */
always?: Array;
+ /**
+ * (Optional) List of tool names that never require approval
+ */
never?: Array;
}
+ /**
+ * Filter configuration for restricting which MCP tools can be used.
+ */
export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
tool_names?: Array;
}
}
diff --git a/src/resources/routes.ts b/src/resources/routes.ts
index f5c533e..98d5dfe 100644
--- a/src/resources/routes.ts
+++ b/src/resources/routes.ts
@@ -6,7 +6,7 @@ import * as InspectAPI from './inspect';
export class Routes extends APIResource {
/**
- * List all routes.
+ * List all available API routes with their methods and implementing providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
@@ -15,10 +15,19 @@ export class Routes extends APIResource {
}
}
+/**
+ * Response containing a list of all available API routes.
+ */
export interface ListRoutesResponse {
+ /**
+ * List of available route information objects
+ */
data: RouteListResponse;
}
+/**
+ * List of available route information objects
+ */
export type RouteListResponse = Array;
export declare namespace Routes {
diff --git a/src/resources/safety.ts b/src/resources/safety.ts
index a701496..d41b2c7 100644
--- a/src/resources/safety.ts
+++ b/src/resources/safety.ts
@@ -13,7 +13,13 @@ export class Safety extends APIResource {
}
}
+/**
+ * Response from running a safety shield.
+ */
export interface RunShieldResponse {
+ /**
+ * (Optional) Safety violation detected by the shield, if any
+ */
violation?: Shared.SafetyViolation;
}
@@ -26,7 +32,7 @@ export interface SafetyRunShieldParams {
/**
* The parameters of the shield.
*/
- params: Record | unknown | null>;
+ params: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The identifier of the shield to run.
diff --git a/src/resources/scoring-functions.ts b/src/resources/scoring-functions.ts
index bc79d19..e4e3c33 100644
--- a/src/resources/scoring-functions.ts
+++ b/src/resources/scoring-functions.ts
@@ -39,59 +39,110 @@ export interface ListScoringFunctionsResponse {
data: ScoringFunctionListResponse;
}
+/**
+ * A scoring function resource for evaluating model outputs.
+ */
export interface ScoringFn {
identifier: string;
- metadata: Record | unknown | null>;
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
provider_id: string;
return_type: Shared.ReturnType;
+ /**
+ * The resource type, always scoring_function
+ */
type: 'scoring_function';
description?: string;
+ /**
+ * Parameters for LLM-as-judge scoring function configuration.
+ */
params?: ScoringFnParams;
provider_resource_id?: string;
}
+/**
+ * Parameters for LLM-as-judge scoring function configuration.
+ */
export type ScoringFnParams =
| ScoringFnParams.LlmAsJudgeScoringFnParams
| ScoringFnParams.RegexParserScoringFnParams
| ScoringFnParams.BasicScoringFnParams;
export namespace ScoringFnParams {
+ /**
+ * Parameters for LLM-as-judge scoring function configuration.
+ */
export interface LlmAsJudgeScoringFnParams {
+ /**
+ * Aggregation functions to apply to the scores of each row
+ */
aggregation_functions: Array<
'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy'
>;
+ /**
+ * Identifier of the LLM model to use as a judge for scoring
+ */
judge_model: string;
+ /**
+ * Regexes to extract the answer from generated response
+ */
judge_score_regexes: Array;
+ /**
+ * The type of scoring function parameters, always llm_as_judge
+ */
type: 'llm_as_judge';
+ /**
+ * (Optional) Custom prompt template for the judge model
+ */
prompt_template?: string;
}
+ /**
+ * Parameters for regex parser scoring function configuration.
+ */
export interface RegexParserScoringFnParams {
+ /**
+ * Aggregation functions to apply to the scores of each row
+ */
aggregation_functions: Array<
'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy'
>;
+ /**
+ * Regex to extract the answer from generated response
+ */
parsing_regexes: Array;
+ /**
+ * The type of scoring function parameters, always regex_parser
+ */
type: 'regex_parser';
}
+ /**
+ * Parameters for basic scoring function configuration.
+ */
export interface BasicScoringFnParams {
+ /**
+ * Aggregation functions to apply to the scores of each row
+ */
aggregation_functions: Array<
'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy'
>;
+ /**
+ * The type of scoring function parameters, always basic
+ */
type: 'basic';
}
}
diff --git a/src/resources/scoring.ts b/src/resources/scoring.ts
index 76a9507..c2e569e 100644
--- a/src/resources/scoring.ts
+++ b/src/resources/scoring.ts
@@ -31,12 +31,21 @@ export interface ScoringScoreResponse {
/**
* A map of scoring function name to ScoringResult.
*/
- results: Record;
+ results: { [key: string]: Shared.ScoringResult };
}
+/**
+ * Response from batch scoring operations on datasets.
+ */
export interface ScoringScoreBatchResponse {
- results: Record;
+ /**
+ * A map of scoring function name to ScoringResult
+ */
+ results: { [key: string]: Shared.ScoringResult };
+ /**
+ * (Optional) The identifier of the dataset that was scored
+ */
dataset_id?: string;
}
@@ -44,12 +53,12 @@ export interface ScoringScoreParams {
/**
* The rows to score.
*/
- input_rows: Array | unknown | null>>;
+ input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
/**
* The scoring functions to use for the scoring.
*/
- scoring_functions: Record;
+ scoring_functions: { [key: string]: ScoringFunctionsAPI.ScoringFnParams | null };
}
export interface ScoringScoreBatchParams {
@@ -66,7 +75,7 @@ export interface ScoringScoreBatchParams {
/**
* The scoring functions to use for the scoring.
*/
- scoring_functions: Record;
+ scoring_functions: { [key: string]: ScoringFunctionsAPI.ScoringFnParams | null };
}
export declare namespace Scoring {
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 6f27b97..7f6fa8b 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -99,13 +99,19 @@ export namespace AgentConfig {
}
export interface AgentToolGroupWithArgs {
- args: Record | unknown | null>;
+ args: { [key: string]: boolean | number | string | Array | unknown | null };
name: string;
}
}
+/**
+ * Response from a batch completion request.
+ */
export interface BatchCompletion {
+ /**
+ * List of completion responses, one for each input in the batch
+ */
batch: Array;
}
@@ -123,15 +129,30 @@ export interface ChatCompletionResponse {
*/
logprobs?: Array;
+ /**
+ * (Optional) List of metrics associated with the API response
+ */
metrics?: Array;
}
export namespace ChatCompletionResponse {
+ /**
+ * A metric value included in API responses.
+ */
export interface Metric {
+ /**
+ * The name of the metric
+ */
metric: string;
+ /**
+ * The numeric value of the metric
+ */
value: number;
+ /**
+ * (Optional) The unit of measurement for the metric value
+ */
unit?: string;
}
}
@@ -166,26 +187,59 @@ export interface CompletionMessage {
tool_calls?: Array;
}
+/**
+ * A text content delta for streaming responses.
+ */
export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta;
export namespace ContentDelta {
+ /**
+ * A text content delta for streaming responses.
+ */
export interface TextDelta {
+ /**
+ * The incremental text content
+ */
text: string;
+ /**
+ * Discriminator type of the delta. Always "text"
+ */
type: 'text';
}
+ /**
+ * An image content delta for streaming responses.
+ */
export interface ImageDelta {
+ /**
+ * The incremental image data as bytes
+ */
image: string;
+ /**
+ * Discriminator type of the delta. Always "image"
+ */
type: 'image';
}
+ /**
+ * A tool call content delta for streaming responses.
+ */
export interface ToolCallDelta {
+ /**
+ * Current parsing status of the tool call
+ */
parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded';
+ /**
+ * Either an in-progress tool call string or the final parsed tool call
+ */
tool_call: Shared.ToolCallOrString;
+ /**
+ * Discriminator type of the delta. Always "tool_call"
+ */
type: 'tool_call';
}
}
@@ -212,7 +266,7 @@ export interface Document {
/**
* Additional metadata for the document.
*/
- metadata: Record | unknown | null>;
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The MIME type of the document.
@@ -259,6 +313,9 @@ export namespace Document {
* Note that URL could have length limits.
*/
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -279,7 +336,13 @@ export namespace Document {
type: 'text';
}
+ /**
+ * A URL reference to external content.
+ */
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -332,6 +395,9 @@ export namespace InterleavedContent {
* Note that URL could have length limits.
*/
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -399,6 +465,9 @@ export namespace InterleavedContentItem {
* Note that URL could have length limits.
*/
export interface URL {
+ /**
+ * The URL string pointing to the resource
+ */
uri: string;
}
}
@@ -425,6 +494,9 @@ export namespace InterleavedContentItem {
*/
export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage;
+/**
+ * Parameter type for string values.
+ */
export type ParamType =
| ParamType.StringType
| ParamType.NumberType
@@ -438,43 +510,103 @@ export type ParamType =
| ParamType.AgentTurnInputType;
export namespace ParamType {
+ /**
+ * Parameter type for string values.
+ */
export interface StringType {
+ /**
+ * Discriminator type. Always "string"
+ */
type: 'string';
}
+ /**
+ * Parameter type for numeric values.
+ */
export interface NumberType {
+ /**
+ * Discriminator type. Always "number"
+ */
type: 'number';
}
+ /**
+ * Parameter type for boolean values.
+ */
export interface BooleanType {
+ /**
+ * Discriminator type. Always "boolean"
+ */
type: 'boolean';
}
+ /**
+ * Parameter type for array values.
+ */
export interface ArrayType {
+ /**
+ * Discriminator type. Always "array"
+ */
type: 'array';
}
+ /**
+ * Parameter type for object values.
+ */
export interface ObjectType {
+ /**
+ * Discriminator type. Always "object"
+ */
type: 'object';
}
+ /**
+ * Parameter type for JSON values.
+ */
export interface JsonType {
+ /**
+ * Discriminator type. Always "json"
+ */
type: 'json';
}
+ /**
+ * Parameter type for union values.
+ */
export interface UnionType {
+ /**
+ * Discriminator type. Always "union"
+ */
type: 'union';
}
+ /**
+ * Parameter type for chat completion input.
+ */
export interface ChatCompletionInputType {
+ /**
+ * Discriminator type. Always "chat_completion_input"
+ */
type: 'chat_completion_input';
}
+ /**
+ * Parameter type for completion input.
+ */
export interface CompletionInputType {
+ /**
+ * Discriminator type. Always "completion_input"
+ */
type: 'completion_input';
}
+ /**
+ * Parameter type for agent turn input.
+ */
export interface AgentTurnInputType {
+ /**
+ * Discriminator type. Always "agent_turn_input"
+ */
type: 'agent_turn_input';
}
}
@@ -510,7 +642,7 @@ export interface QueryConfig {
* Search mode for retrieval—either "vector", "keyword", or "hybrid". Default
* "vector".
*/
- mode?: string;
+ mode?: 'vector' | 'keyword' | 'hybrid';
/**
* Configuration for the ranker to use in hybrid search. Defaults to RRF ranker.
@@ -525,8 +657,7 @@ export namespace QueryConfig {
export interface RrfRanker {
/**
* The impact factor for RRF scoring. Higher values give more weight to
- * higher-ranked results. Must be greater than 0. Default of 60 is from the
- * original RRF paper (Cormack et al., 2009).
+ * higher-ranked results. Must be greater than 0
*/
impact_factor: number;
@@ -553,31 +684,61 @@ export namespace QueryConfig {
}
}
+/**
+ * Configuration for the default RAG query generator.
+ */
export type QueryGeneratorConfig =
| QueryGeneratorConfig.DefaultRagQueryGeneratorConfig
| QueryGeneratorConfig.LlmragQueryGeneratorConfig;
export namespace QueryGeneratorConfig {
+ /**
+ * Configuration for the default RAG query generator.
+ */
export interface DefaultRagQueryGeneratorConfig {
+ /**
+ * String separator used to join query terms
+ */
separator: string;
+ /**
+ * Type of query generator, always 'default'
+ */
type: 'default';
}
+ /**
+ * Configuration for the LLM-based RAG query generator.
+ */
export interface LlmragQueryGeneratorConfig {
+ /**
+ * Name of the language model to use for query generation
+ */
model: string;
+ /**
+ * Template string for formatting the query generation prompt
+ */
template: string;
+ /**
+ * Type of query generator, always 'llm'
+ */
type: 'llm';
}
}
+/**
+ * Result of a RAG query containing retrieved content and metadata.
+ */
export interface QueryResult {
- metadata: Record | unknown | null>;
+ /**
+ * Additional metadata about the query result
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * A image content item
+ * (Optional) The retrieved content from the query
*/
content?: InterleavedContent;
}
@@ -596,7 +757,7 @@ export namespace ResponseFormat {
* The JSON schema the response should conform to. In a Python SDK, this is often a
* `pydantic` model.
*/
- json_schema: Record | unknown | null>;
+ json_schema: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* Must be "json_schema" to identify this format type
@@ -611,7 +772,7 @@ export namespace ResponseFormat {
/**
* The BNF grammar specification the response should conform to
*/
- bnf: Record | unknown | null>;
+ bnf: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* Must be "grammar" to identify this format type
@@ -634,11 +795,24 @@ export interface ReturnType {
| 'agent_turn_input';
}
+/**
+ * Details of a safety violation detected by content moderation.
+ */
export interface SafetyViolation {
- metadata: Record | unknown | null>;
+ /**
+ * Additional metadata including specific violation codes for debugging and
+ * telemetry
+ */
+ metadata: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * Severity level of the violation
+ */
violation_level: 'info' | 'warn' | 'error';
+ /**
+ * (Optional) Message to convey to the user about the violation
+ */
user_message?: string;
}
@@ -675,21 +849,50 @@ export interface SamplingParams {
}
export namespace SamplingParams {
+ /**
+ * Greedy sampling strategy that selects the highest probability token at each
+ * step.
+ */
export interface GreedySamplingStrategy {
+ /**
+ * Must be "greedy" to identify this sampling strategy
+ */
type: 'greedy';
}
+ /**
+ * Top-p (nucleus) sampling strategy that samples from the smallest set of tokens
+ * with cumulative probability >= p.
+ */
export interface TopPSamplingStrategy {
+ /**
+ * Must be "top_p" to identify this sampling strategy
+ */
type: 'top_p';
+ /**
+ * Controls randomness in sampling. Higher values increase randomness
+ */
temperature?: number;
+ /**
+ * Cumulative probability threshold for nucleus sampling. Defaults to 0.95
+ */
top_p?: number;
}
+ /**
+ * Top-k sampling strategy that restricts sampling to the k most likely tokens.
+ */
export interface TopKSamplingStrategy {
+ /**
+ * Number of top tokens to consider for sampling. Must be at least 1
+ */
top_k: number;
+ /**
+ * Must be "top_k" to identify this sampling strategy
+ */
type: 'top_k';
}
}
@@ -701,12 +904,12 @@ export interface ScoringResult {
/**
* Map of metric name to aggregated value
*/
- aggregated_results: Record | unknown | null>;
+ aggregated_results: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The scoring result for each row. Each row is a map of column name to value.
*/
- score_rows: Array | unknown | null>>;
+ score_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
/**
@@ -729,15 +932,15 @@ export interface SystemMessage {
export interface ToolCall {
arguments:
| string
- | Record<
- string,
- | string
- | number
- | boolean
- | Array
- | Record
- | null
- >;
+ | {
+ [key: string]:
+ | string
+ | number
+ | boolean
+ | Array
+ | { [key: string]: string | number | boolean | null }
+ | null;
+ };
call_id: string;
@@ -746,6 +949,9 @@ export interface ToolCall {
arguments_json?: string;
}
+/**
+ * Either an in-progress tool call string or the final parsed tool call
+ */
export type ToolCallOrString = string | ToolCall;
export interface ToolParamDefinition {
diff --git a/src/resources/shields.ts b/src/resources/shields.ts
index 6dac0e2..d72afe9 100644
--- a/src/resources/shields.ts
+++ b/src/resources/shields.ts
@@ -33,16 +33,22 @@ export interface ListShieldsResponse {
}
/**
- * A safety shield resource that can be used to check content
+ * A safety shield resource that can be used to check content.
*/
export interface Shield {
identifier: string;
provider_id: string;
+ /**
+ * The resource type, always shield
+ */
type: 'shield';
- params?: Record | unknown | null>;
+ /**
+ * (Optional) Configuration parameters for the shield
+ */
+ params?: { [key: string]: boolean | number | string | Array | unknown | null };
provider_resource_id?: string;
}
@@ -58,7 +64,7 @@ export interface ShieldRegisterParams {
/**
* The parameters of the shield.
*/
- params?: Record | unknown | null>;
+ params?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
* The identifier of the provider.
diff --git a/src/resources/synthetic-data-generation.ts b/src/resources/synthetic-data-generation.ts
index 4c47616..852eb81 100644
--- a/src/resources/synthetic-data-generation.ts
+++ b/src/resources/synthetic-data-generation.ts
@@ -5,6 +5,9 @@ import * as Core from '../core';
import * as Shared from './shared';
export class SyntheticDataGeneration extends APIResource {
+ /**
+ * Generate synthetic data based on input dialogs and apply filtering.
+ */
generate(
body: SyntheticDataGenerationGenerateParams,
options?: Core.RequestOptions,
@@ -18,19 +21,33 @@ export class SyntheticDataGeneration extends APIResource {
* tuples that pass the threshold.
*/
export interface SyntheticDataGenerationResponse {
- synthetic_data: Array | unknown | null>>;
+ /**
+ * List of generated synthetic data samples that passed the filtering criteria
+ */
+ synthetic_data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
- statistics?: Record | unknown | null>;
+ /**
+ * (Optional) Statistical information about the generation process and filtering
+ * results
+ */
+ statistics?: { [key: string]: boolean | number | string | Array | unknown | null };
}
export interface SyntheticDataGenerationGenerateParams {
+ /**
+ * List of conversation messages to use as input for synthetic data generation
+ */
dialogs: Array;
/**
- * The type of filtering function.
+ * Type of filtering to apply to generated synthetic data samples
*/
filtering_function: 'none' | 'random' | 'top_k' | 'top_p' | 'top_k_top_p' | 'sigmoid';
+ /**
+ * (Optional) The identifier of the model to use. The model must be registered with
+ * Llama Stack and available via the /models endpoint
+ */
model?: string;
}
diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts
index 0a18495..3e47e3c 100644
--- a/src/resources/telemetry.ts
+++ b/src/resources/telemetry.ts
@@ -91,152 +91,366 @@ export class Telemetry extends APIResource {
}
}
+/**
+ * An unstructured log event containing a simple text message.
+ */
export type Event = Event.UnstructuredLogEvent | Event.MetricEvent | Event.StructuredLogEvent;
export namespace Event {
+ /**
+ * An unstructured log event containing a simple text message.
+ */
export interface UnstructuredLogEvent {
+ /**
+ * The log message text
+ */
message: string;
+ /**
+ * The severity level of the log message
+ */
severity: 'verbose' | 'debug' | 'info' | 'warn' | 'error' | 'critical';
+ /**
+ * Unique identifier for the span this event belongs to
+ */
span_id: string;
+ /**
+ * Timestamp when the event occurred
+ */
timestamp: string;
+ /**
+ * Unique identifier for the trace this event belongs to
+ */
trace_id: string;
+ /**
+ * Event type identifier set to UNSTRUCTURED_LOG
+ */
type: 'unstructured_log';
- attributes?: Record;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the event
+ */
+ attributes?: { [key: string]: string | number | boolean | null };
}
+ /**
+ * A metric event containing a measured value.
+ */
export interface MetricEvent {
+ /**
+ * The name of the metric being measured
+ */
metric: string;
+ /**
+ * Unique identifier for the span this event belongs to
+ */
span_id: string;
+ /**
+ * Timestamp when the event occurred
+ */
timestamp: string;
+ /**
+ * Unique identifier for the trace this event belongs to
+ */
trace_id: string;
+ /**
+ * Event type identifier set to METRIC
+ */
type: 'metric';
+ /**
+ * The unit of measurement for the metric value
+ */
unit: string;
+ /**
+ * The numeric value of the metric measurement
+ */
value: number;
- attributes?: Record;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the event
+ */
+ attributes?: { [key: string]: string | number | boolean | null };
}
+ /**
+ * A structured log event containing typed payload data.
+ */
export interface StructuredLogEvent {
+ /**
+ * The structured payload data for the log event
+ */
payload: StructuredLogEvent.SpanStartPayload | StructuredLogEvent.SpanEndPayload;
+ /**
+ * Unique identifier for the span this event belongs to
+ */
span_id: string;
+ /**
+ * Timestamp when the event occurred
+ */
timestamp: string;
+ /**
+ * Unique identifier for the trace this event belongs to
+ */
trace_id: string;
+ /**
+ * Event type identifier set to STRUCTURED_LOG
+ */
type: 'structured_log';
- attributes?: Record;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the event
+ */
+ attributes?: { [key: string]: string | number | boolean | null };
}
export namespace StructuredLogEvent {
+ /**
+ * Payload for a span start event.
+ */
export interface SpanStartPayload {
+ /**
+ * Human-readable name describing the operation this span represents
+ */
name: string;
+ /**
+ * Payload type identifier set to SPAN_START
+ */
type: 'span_start';
+ /**
+ * (Optional) Unique identifier for the parent span, if this is a child span
+ */
parent_span_id?: string;
}
+ /**
+ * Payload for a span end event.
+ */
export interface SpanEndPayload {
+ /**
+ * The final status of the span indicating success or failure
+ */
status: 'ok' | 'error';
+ /**
+ * Payload type identifier set to SPAN_END
+ */
type: 'span_end';
}
}
}
+/**
+ * A condition for filtering query results.
+ */
export interface QueryCondition {
+ /**
+ * The attribute key to filter on
+ */
key: string;
+ /**
+ * The comparison operator to apply
+ */
op: 'eq' | 'ne' | 'gt' | 'lt';
+ /**
+ * The value to compare against
+ */
value: boolean | number | string | Array | unknown | null;
}
+/**
+ * Response containing a list of spans.
+ */
export interface QuerySpansResponse {
+ /**
+ * List of spans matching the query criteria
+ */
data: TelemetryQuerySpansResponse;
}
+/**
+ * A span that includes status information.
+ */
export interface SpanWithStatus {
+ /**
+ * Human-readable name describing the operation this span represents
+ */
name: string;
+ /**
+ * Unique identifier for the span
+ */
span_id: string;
+ /**
+ * Timestamp when the operation began
+ */
start_time: string;
+ /**
+ * Unique identifier for the trace this span belongs to
+ */
trace_id: string;
- attributes?: Record | unknown | null>;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the span
+ */
+ attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Timestamp when the operation finished, if completed
+ */
end_time?: string;
+ /**
+ * (Optional) Unique identifier for the parent span, if this is a child span
+ */
parent_span_id?: string;
+ /**
+ * (Optional) The current status of the span
+ */
status?: 'ok' | 'error';
}
+/**
+ * A trace representing the complete execution path of a request across multiple
+ * operations.
+ */
export interface Trace {
+ /**
+ * Unique identifier for the root span that started this trace
+ */
root_span_id: string;
+ /**
+ * Timestamp when the trace began
+ */
start_time: string;
+ /**
+ * Unique identifier for the trace
+ */
trace_id: string;
+ /**
+ * (Optional) Timestamp when the trace finished, if completed
+ */
end_time?: string;
}
+/**
+ * A span representing a single operation within a trace.
+ */
export interface TelemetryGetSpanResponse {
+ /**
+ * Human-readable name describing the operation this span represents
+ */
name: string;
+ /**
+ * Unique identifier for the span
+ */
span_id: string;
+ /**
+ * Timestamp when the operation began
+ */
start_time: string;
+ /**
+ * Unique identifier for the trace this span belongs to
+ */
trace_id: string;
- attributes?: Record | unknown | null>;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the span
+ */
+ attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Timestamp when the operation finished, if completed
+ */
end_time?: string;
+ /**
+ * (Optional) Unique identifier for the parent span, if this is a child span
+ */
parent_span_id?: string;
}
-export type TelemetryGetSpanTreeResponse = Record;
+/**
+ * Dictionary mapping span IDs to spans with status information
+ */
+export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus };
+/**
+ * List of spans matching the query criteria
+ */
export type TelemetryQuerySpansResponse = Array;
export namespace TelemetryQuerySpansResponse {
+ /**
+ * A span representing a single operation within a trace.
+ */
export interface TelemetryQuerySpansResponseItem {
+ /**
+ * Human-readable name describing the operation this span represents
+ */
name: string;
+ /**
+ * Unique identifier for the span
+ */
span_id: string;
+ /**
+ * Timestamp when the operation began
+ */
start_time: string;
+ /**
+ * Unique identifier for the trace this span belongs to
+ */
trace_id: string;
- attributes?: Record | unknown | null>;
+ /**
+ * (Optional) Key-value pairs containing additional metadata about the span
+ */
+ attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) Timestamp when the operation finished, if completed
+ */
end_time?: string;
+ /**
+ * (Optional) Unique identifier for the parent span, if this is a child span
+ */
parent_span_id?: string;
}
}
+/**
+ * List of traces matching the query criteria
+ */
export type TelemetryQueryTracesResponse = Array;
export interface TelemetryGetSpanTreeParams {
diff --git a/src/resources/tool-runtime/rag-tool.ts b/src/resources/tool-runtime/rag-tool.ts
index 9bcd4b2..b9f6669 100644
--- a/src/resources/tool-runtime/rag-tool.ts
+++ b/src/resources/tool-runtime/rag-tool.ts
@@ -6,7 +6,7 @@ import * as Shared from '../shared';
export class RagTool extends APIResource {
/**
- * Index documents so they can be used by the RAG system
+ * Index documents so they can be used by the RAG system.
*/
insert(body: RagToolInsertParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/tool-runtime/rag-tool/insert', {
@@ -17,7 +17,7 @@ export class RagTool extends APIResource {
}
/**
- * Query the RAG system for context; typically invoked by the agent
+ * Query the RAG system for context; typically invoked by the agent.
*/
query(body: RagToolQueryParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/tool-runtime/rag-tool/query', { body, ...options });
@@ -25,23 +25,35 @@ export class RagTool extends APIResource {
}
export interface RagToolInsertParams {
+ /**
+ * (Optional) Size in tokens for document chunking during indexing
+ */
chunk_size_in_tokens: number;
+ /**
+ * List of documents to index in the RAG system
+ */
documents: Array;
+ /**
+ * ID of the vector database to store the document embeddings
+ */
vector_db_id: string;
}
export interface RagToolQueryParams {
/**
- * A image content item
+ * The query content to search for in the indexed documents
*/
content: Shared.InterleavedContent;
+ /**
+ * List of vector database IDs to search within
+ */
vector_db_ids: Array;
/**
- * Configuration for the RAG query generation.
+ * (Optional) Configuration parameters for the query operation
*/
query_config?: Shared.QueryConfig;
}
diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts
index 0bcb705..ca1a6c8 100644
--- a/src/resources/tool-runtime/tool-runtime.ts
+++ b/src/resources/tool-runtime/tool-runtime.ts
@@ -43,50 +43,98 @@ export class ToolRuntime extends APIResource {
}
}
+/**
+ * Tool definition used in runtime contexts.
+ */
export interface ToolDef {
+ /**
+ * Name of the tool
+ */
name: string;
+ /**
+ * (Optional) Human-readable description of what the tool does
+ */
description?: string;
- metadata?: Record | unknown | null>;
+ /**
+ * (Optional) Additional metadata about the tool
+ */
+ metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
+ /**
+ * (Optional) List of parameters this tool accepts
+ */
parameters?: Array