diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 763462f..43fd5a7 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -9,9 +9,7 @@
"postCreateCommand": "yarn install",
"customizations": {
"vscode": {
- "extensions": [
- "esbenp.prettier-vscode"
- ]
+ "extensions": ["esbenp.prettier-vscode"]
}
}
}
diff --git a/.gitignore b/.gitignore
index d98d51a..2412bb7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,5 @@ dist
dist-deno
/*.tgz
.idea/
+.eslintcache
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index ed9acd2..1ae2526 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.23-alpha.1"
+ ".": "0.3.0-alpha.1"
}
diff --git a/.stats.yml b/.stats.yml
index fa9edfc..bf6aaff 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml
-openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3
-config_hash: 0358112cc0f3d880b4d55debdbe1cfa3
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-15a929a0b71de779accc56bd09d1e5f580e216affdb408cf9890bc7a37847e9e.yml
+openapi_spec_hash: 5db9f7c7e80427cfa0298cbb01689559
+config_hash: 06758df5c4f261f9c97eafcef7e0028f
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e40a318..2d99f43 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,57 @@
# Changelog
+## 0.3.0-alpha.1 (2025-10-13)
+
+Full Changelog: [v0.2.23-alpha.1...v0.3.0-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.23-alpha.1...v0.3.0-alpha.1)
+
+### ⚠ BREAKING CHANGES
+
+* **api:** use input_schema instead of parameters for tools
+* **api:** fixes to remove deprecated inference resources
+
+### Features
+
+* **api:** expires_after changes for /files ([a0b0fb7](https://github.com/llamastack/llama-stack-client-typescript/commit/a0b0fb7aa74668f3f6996c178f9654723b8b0f22))
+* **api:** fix file batches SDK to list_files ([25a0f10](https://github.com/llamastack/llama-stack-client-typescript/commit/25a0f10cffa7de7f1457d65c97259911bc70ab0a))
+* **api:** fixes to remove deprecated inference resources ([367d775](https://github.com/llamastack/llama-stack-client-typescript/commit/367d775c3d5a2fd85bf138d2b175e91b7c185913))
+* **api:** fixes to URLs ([e4f7840](https://github.com/llamastack/llama-stack-client-typescript/commit/e4f78407f74f3ba7597de355c314e1932dd94761))
+* **api:** move post_training and eval under alpha namespace ([aec1d5f](https://github.com/llamastack/llama-stack-client-typescript/commit/aec1d5ff198473ba736bf543ad00c6626cab9b81))
+* **api:** moving { rerank, agents } to `client.alpha.` ([793e069](https://github.com/llamastack/llama-stack-client-typescript/commit/793e0694d75c2af4535bf991d5858cd1f21300b4))
+* **api:** removing openai/v1 ([b5432de](https://github.com/llamastack/llama-stack-client-typescript/commit/b5432de2ad56ff0d2fd5a5b8e1755b5237616b60))
+* **api:** SDKs for vector store file batches ([b0676c8](https://github.com/llamastack/llama-stack-client-typescript/commit/b0676c837bbd835276fea3fe12f435afdbb75ef7))
+* **api:** SDKs for vector store file batches apis ([88731bf](https://github.com/llamastack/llama-stack-client-typescript/commit/88731bfecd6f548ae79cbe2a1125620e488c42a3))
+* **api:** several updates including Conversations, Responses changes, etc. ([e0728d5](https://github.com/llamastack/llama-stack-client-typescript/commit/e0728d5dd59be8723d9f967d6164351eb05528d1))
+* **api:** tool api (input_schema, etc.) changes ([06f2bca](https://github.com/llamastack/llama-stack-client-typescript/commit/06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248))
+* **api:** updates to vector_store, etc. ([19535c2](https://github.com/llamastack/llama-stack-client-typescript/commit/19535c27147bf6f6861b807d9eeee471b5625148))
+* **api:** updating post /v1/files to have correct multipart/form-data ([f1cf9d6](https://github.com/llamastack/llama-stack-client-typescript/commit/f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47))
+* **api:** use input_schema instead of parameters for tools ([8910a12](https://github.com/llamastack/llama-stack-client-typescript/commit/8910a121146aeddcb8f400101e6a2232245097e0))
+
+
+### Bug Fixes
+
+* **api:** another fix to capture correct responses.create() params ([6acae91](https://github.com/llamastack/llama-stack-client-typescript/commit/6acae910db289080e8f52864f1bdf6d7951d1c3b))
+* **api:** fix the ToolDefParam updates ([5cee3d6](https://github.com/llamastack/llama-stack-client-typescript/commit/5cee3d69650a4c827e12fc046c1d2ec3b2fa9126))
+* fix stream event model reference ([a71b421](https://github.com/llamastack/llama-stack-client-typescript/commit/a71b421152a609e49e76d01c6e4dd46eb3dbfae0))
+
+
+### Chores
+
+* extract some types in mcp docs ([dcc7bb8](https://github.com/llamastack/llama-stack-client-typescript/commit/dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff))
+* **internal:** codegen related update ([252e0a2](https://github.com/llamastack/llama-stack-client-typescript/commit/252e0a2a38bd8aedab91b401c440a9b10c056cec))
+* **internal:** codegen related update ([34da720](https://github.com/llamastack/llama-stack-client-typescript/commit/34da720c34c35dafb38775243d28dfbdce2497db))
+* **internal:** fix incremental formatting in some cases ([c5c8292](https://github.com/llamastack/llama-stack-client-typescript/commit/c5c8292b631c678efff5498bbab9f5a43bee50b6))
+* **internal:** use npm pack for build uploads ([a246793](https://github.com/llamastack/llama-stack-client-typescript/commit/a24679300cff93fea8ad4bc85e549ecc88198d58))
+
+
+### Documentation
+
+* update examples ([17b9eb3](https://github.com/llamastack/llama-stack-client-typescript/commit/17b9eb3c40957b63d2a71f7fc21944abcc720d80))
+
+
+### Build System
+
+* Bump version to 0.2.23 ([16e05ed](https://github.com/llamastack/llama-stack-client-typescript/commit/16e05ed9798233375e19098992632d223c3f5d8d))
+
## 0.2.23-alpha.1 (2025-09-26)
Full Changelog: [v0.2.19-alpha.1...v0.2.23-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.19-alpha.1...v0.2.23-alpha.1)
diff --git a/README.md b/README.md
index a27b8c1..c0f0665 100644
--- a/README.md
+++ b/README.md
@@ -41,13 +41,13 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const stream = await client.inference.chatCompletion({
+const stream = await client.chat.completions.create({
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
stream: true,
});
-for await (const chatCompletionResponseStreamChunk of stream) {
- console.log(chatCompletionResponseStreamChunk.completion_message);
+for await (const chatCompletionChunk of stream) {
+ console.log(chatCompletionChunk);
}
```
@@ -64,11 +64,11 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const params: LlamaStackClient.InferenceChatCompletionParams = {
+const params: LlamaStackClient.Chat.CompletionCreateParams = {
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
};
-const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion(
+const completion: LlamaStackClient.Chat.CompletionCreateResponse = await client.chat.completions.create(
params,
);
```
@@ -113,8 +113,8 @@ a subclass of `APIError` will be thrown:
```ts
-const chatCompletionResponse = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const completion = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.catch(async (err) => {
if (err instanceof LlamaStackClient.APIError) {
console.log(err.status); // 400
@@ -155,7 +155,7 @@ const client = new LlamaStackClient({
});
// Or, configure per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
maxRetries: 5,
});
```
@@ -172,7 +172,7 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
timeout: 5 * 1000,
});
```
@@ -193,17 +193,17 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
const client = new LlamaStackClient();
-const response = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const response = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.asResponse();
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: chatCompletionResponse, response: raw } = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const { data: completion, response: raw } = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(chatCompletionResponse.completion_message);
+console.log(completion);
```
### Making custom/undocumented requests
@@ -307,8 +307,8 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion(
- { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' },
+await client.chat.completions.create(
+ { messages: [{ content: 'string', role: 'user' }], model: 'model' },
{
httpAgent: new http.Agent({ keepAlive: false }),
},
diff --git a/api.md b/api.md
index 01d88a5..fc7c09e 100644
--- a/api.md
+++ b/api.md
@@ -3,18 +3,13 @@
Types:
- AgentConfig
-- BatchCompletion
-- ChatCompletionResponse
- CompletionMessage
-- ContentDelta
- Document
- InterleavedContent
- InterleavedContentItem
- Message
-- Metric
- ParamType
- QueryConfig
-- QueryGeneratorConfig
- QueryResult
- ResponseFormat
- SafetyViolation
@@ -22,7 +17,6 @@ Types:
- ScoringResult
- SystemMessage
- ToolCall
-- ToolParamDefinition
- ToolResponseMessage
- UserMessage
@@ -45,14 +39,12 @@ Methods:
Types:
-- ListToolsResponse
-- Tool
- ToolListResponse
Methods:
- client.tools.list({ ...params }) -> ToolListResponse
-- client.tools.get(toolName) -> Tool
+- client.tools.get(toolName) -> ToolDef
# ToolRuntime
@@ -85,10 +77,10 @@ Types:
Methods:
-- client.responses.create({ ...params }) -> ResponseObject
-- client.responses.retrieve(responseId) -> ResponseObject
-- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
-- client.responses.delete(responseId) -> ResponseDeleteResponse
+- client.responses.create({ ...params }) -> ResponseObject
+- client.responses.retrieve(responseId) -> ResponseObject
+- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
+- client.responses.delete(responseId) -> ResponseDeleteResponse
## InputItems
@@ -98,67 +90,35 @@ Types:
Methods:
-- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
+- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
-# Agents
+# Conversations
Types:
-- InferenceStep
-- MemoryRetrievalStep
-- ShieldCallStep
-- ToolExecutionStep
-- ToolResponse
-- AgentCreateResponse
-- AgentRetrieveResponse
-- AgentListResponse
+- ConversationObject
+- ConversationDeleteResponse
Methods:
-- client.agents.create({ ...params }) -> AgentCreateResponse
-- client.agents.retrieve(agentId) -> AgentRetrieveResponse
-- client.agents.list({ ...params }) -> AgentListResponse
-- client.agents.delete(agentId) -> void
+- client.conversations.create({ ...params }) -> ConversationObject
+- client.conversations.retrieve(conversationId) -> ConversationObject
+- client.conversations.update(conversationId, { ...params }) -> ConversationObject
+- client.conversations.delete(conversationId) -> ConversationDeleteResponse
-## Session
+## Items
Types:
-- Session
-- SessionCreateResponse
-- SessionListResponse
+- ItemCreateResponse
+- ItemListResponse
+- ItemGetResponse
Methods:
-- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
-- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
-- client.agents.session.list(agentId, { ...params }) -> SessionListResponse
-- client.agents.session.delete(agentId, sessionId) -> void
-
-## Steps
-
-Types:
-
-- StepRetrieveResponse
-
-Methods:
-
-- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse
-
-## Turn
-
-Types:
-
-- AgentTurnResponseStreamChunk
-- Turn
-- TurnResponseEvent
-- TurnResponseEventPayload
-
-Methods:
-
-- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
-- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
-- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
+- client.conversations.items.create(conversationId, { ...params }) -> ItemCreateResponse
+- client.conversations.items.list(conversationId, { ...params }) -> ItemListResponse
+- client.conversations.items.get(conversationId, itemId) -> ItemGetResponse
# Datasets
@@ -172,36 +132,12 @@ Types:
Methods:
-- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
-- client.datasets.list() -> DatasetListResponse
-- client.datasets.appendrows(datasetId, { ...params }) -> void
-- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
-- client.datasets.register({ ...params }) -> DatasetRegisterResponse
-- client.datasets.unregister(datasetId) -> void
-
-# Eval
-
-Types:
-
-- BenchmarkConfig
-- EvalCandidate
-- EvaluateResponse
-- Job
-
-Methods:
-
-- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.runEval(benchmarkId, { ...params }) -> Job
-- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
-
-## Jobs
-
-Methods:
-
-- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
-- client.eval.jobs.cancel(benchmarkId, jobId) -> void
-- client.eval.jobs.status(benchmarkId, jobId) -> Job
+- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
+- client.datasets.list() -> DatasetListResponse
+- client.datasets.appendrows(datasetId, { ...params }) -> void
+- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
+- client.datasets.register({ ...params }) -> DatasetRegisterResponse
+- client.datasets.unregister(datasetId) -> void
# Inspect
@@ -217,26 +153,6 @@ Methods:
- client.inspect.health() -> HealthInfo
- client.inspect.version() -> VersionInfo
-# Inference
-
-Types:
-
-- ChatCompletionResponseStreamChunk
-- CompletionResponse
-- EmbeddingsResponse
-- TokenLogProbs
-- InferenceBatchChatCompletionResponse
-- InferenceRerankResponse
-
-Methods:
-
-- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse
-- client.inference.batchCompletion({ ...params }) -> BatchCompletion
-- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse
-- client.inference.completion({ ...params }) -> CompletionResponse
-- client.inference.embeddings({ ...params }) -> EmbeddingsResponse
-- client.inference.rerank({ ...params }) -> InferenceRerankResponse
-
# Embeddings
Types:
@@ -245,7 +161,7 @@ Types:
Methods:
-- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
+- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
# Chat
@@ -263,9 +179,9 @@ Types:
Methods:
-- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
-- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
-- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
+- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
+- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
+- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
# Completions
@@ -275,7 +191,7 @@ Types:
Methods:
-- client.completions.create({ ...params }) -> CompletionCreateResponse
+- client.completions.create({ ...params }) -> CompletionCreateResponse
# VectorIo
@@ -288,22 +204,6 @@ Methods:
- client.vectorIo.insert({ ...params }) -> void
- client.vectorIo.query({ ...params }) -> QueryChunksResponse
-# VectorDBs
-
-Types:
-
-- ListVectorDBsResponse
-- VectorDBRetrieveResponse
-- VectorDBListResponse
-- VectorDBRegisterResponse
-
-Methods:
-
-- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse
-- client.vectorDBs.list() -> VectorDBListResponse
-- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse
-- client.vectorDBs.unregister(vectorDBId) -> void
-
# VectorStores
Types:
@@ -315,12 +215,12 @@ Types:
Methods:
-- client.vectorStores.create({ ...params }) -> VectorStore
-- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
-- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
-- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
-- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
-- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
+- client.vectorStores.create({ ...params }) -> VectorStore
+- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
+- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
+- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
## Files
@@ -332,12 +232,26 @@ Types:
Methods:
-- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
-- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
-- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
-- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
+- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
+- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+
+## FileBatches
+
+Types:
+
+- ListVectorStoreFilesInBatchResponse
+- VectorStoreFileBatches
+
+Methods:
+
+- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage
# Models
@@ -356,41 +270,9 @@ Methods:
## OpenAI
-Types:
-
-- OpenAIListResponse
-
-Methods:
-
-- client.models.openai.list() -> OpenAIListResponse
-
-# PostTraining
-
-Types:
-
-- AlgorithmConfig
-- ListPostTrainingJobsResponse
-- PostTrainingJob
-
-Methods:
-
-- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob
-- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob
-
-## Job
-
-Types:
-
-- JobListResponse
-- JobArtifactsResponse
-- JobStatusResponse
-
Methods:
-- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data>
-- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
-- client.postTraining.job.cancel({ ...params }) -> void
-- client.postTraining.job.status({ ...params }) -> JobStatusResponse
+- client.models.openai.list() -> ModelListResponse
# Providers
@@ -423,7 +305,7 @@ Types:
Methods:
-- client.moderations.create({ ...params }) -> CreateResponse
+- client.moderations.create({ ...params }) -> CreateResponse
# Safety
@@ -477,14 +359,13 @@ Types:
Methods:
-- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse
-- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
-- client.telemetry.getTrace(traceId) -> Trace
-- client.telemetry.logEvent({ ...params }) -> void
-- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse
-- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
-- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
-- client.telemetry.saveSpansToDataset({ ...params }) -> void
+- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse
+- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
+- client.telemetry.getTrace(traceId) -> Trace
+- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse
+- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
+- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
+- client.telemetry.saveSpansToDataset({ ...params }) -> void
# Scoring
@@ -523,9 +404,9 @@ Types:
Methods:
-- client.benchmarks.retrieve(benchmarkId) -> Benchmark
-- client.benchmarks.list() -> BenchmarkListResponse
-- client.benchmarks.register({ ...params }) -> void
+- client.benchmarks.retrieve(benchmarkId) -> Benchmark
+- client.benchmarks.list() -> BenchmarkListResponse
+- client.benchmarks.register({ ...params }) -> void
# Files
@@ -538,8 +419,130 @@ Types:
Methods:
-- client.files.create({ ...params }) -> File
-- client.files.retrieve(fileId) -> File
-- client.files.list({ ...params }) -> FilesOpenAICursorPage
-- client.files.delete(fileId) -> DeleteFileResponse
-- client.files.content(fileId) -> unknown
+- client.files.create({ ...params }) -> File
+- client.files.retrieve(fileId) -> File
+- client.files.list({ ...params }) -> FilesOpenAICursorPage
+- client.files.delete(fileId) -> DeleteFileResponse
+- client.files.content(fileId) -> unknown
+
+# Alpha
+
+## Inference
+
+Types:
+
+- InferenceRerankResponse
+
+Methods:
+
+- client.alpha.inference.rerank({ ...params }) -> InferenceRerankResponse
+
+## PostTraining
+
+Types:
+
+- AlgorithmConfig
+- ListPostTrainingJobsResponse
+- PostTrainingJob
+
+Methods:
+
+- client.alpha.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob
+- client.alpha.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob
+
+### Job
+
+Types:
+
+- JobListResponse
+- JobArtifactsResponse
+- JobStatusResponse
+
+Methods:
+
+- client.alpha.postTraining.job.list() -> JobListResponse
+- client.alpha.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
+- client.alpha.postTraining.job.cancel({ ...params }) -> void
+- client.alpha.postTraining.job.status({ ...params }) -> JobStatusResponse
+
+## Eval
+
+Types:
+
+- BenchmarkConfig
+- EvaluateResponse
+- Job
+
+Methods:
+
+- client.alpha.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
+- client.alpha.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
+- client.alpha.eval.runEval(benchmarkId, { ...params }) -> Job
+- client.alpha.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
+
+### Jobs
+
+Methods:
+
+- client.alpha.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
+- client.alpha.eval.jobs.cancel(benchmarkId, jobId) -> void
+- client.alpha.eval.jobs.status(benchmarkId, jobId) -> Job
+
+## Agents
+
+Types:
+
+- InferenceStep
+- MemoryRetrievalStep
+- ShieldCallStep
+- ToolExecutionStep
+- ToolResponse
+- AgentCreateResponse
+- AgentRetrieveResponse
+- AgentListResponse
+
+Methods:
+
+- client.alpha.agents.create({ ...params }) -> AgentCreateResponse
+- client.alpha.agents.retrieve(agentId) -> AgentRetrieveResponse
+- client.alpha.agents.list({ ...params }) -> AgentListResponse
+- client.alpha.agents.delete(agentId) -> void
+
+### Session
+
+Types:
+
+- Session
+- SessionCreateResponse
+- SessionListResponse
+
+Methods:
+
+- client.alpha.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
+- client.alpha.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
+- client.alpha.agents.session.list(agentId, { ...params }) -> SessionListResponse
+- client.alpha.agents.session.delete(agentId, sessionId) -> void
+
+### Steps
+
+Types:
+
+- StepRetrieveResponse
+
+Methods:
+
+- client.alpha.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse
+
+### Turn
+
+Types:
+
+- AgentTurnResponseStreamChunk
+- Turn
+- TurnResponseEvent
+
+Methods:
+
+- client.alpha.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
+- client.alpha.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
+- client.alpha.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
diff --git a/package.json b/package.json
index 6daf907..aa18833 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "llama-stack-client",
- "version": "0.2.23",
+ "version": "0.3.0-alpha.1",
"description": "The official TypeScript library for the Llama Stack Client API",
"author": "Llama Stack Client ",
"types": "dist/index.d.ts",
diff --git a/release-please-config.json b/release-please-config.json
index 624ed99..1ebd0bd 100644
--- a/release-please-config.json
+++ b/release-please-config.json
@@ -60,8 +60,5 @@
}
],
"release-type": "node",
- "extra-files": [
- "src/version.ts",
- "README.md"
- ]
+ "extra-files": ["src/version.ts", "README.md"]
}
diff --git a/scripts/fast-format b/scripts/fast-format
index 03fb1a3..8a8e9d5 100755
--- a/scripts/fast-format
+++ b/scripts/fast-format
@@ -35,6 +35,6 @@ echo "==> Running prettier --write"
PRETTIER_FILES="$(grep '\.\(js\|json\)$' "$FILE_LIST" || true)"
if ! [ -z "$PRETTIER_FILES" ]; then
echo "$PRETTIER_FILES" | xargs ./node_modules/.bin/prettier \
- --write --cache --cache-strategy metadata \
+ --write --cache --cache-strategy metadata --no-error-on-unmatched-pattern \
'!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs'
fi
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
index 211e0b9..831e716 100755
--- a/scripts/utils/upload-artifact.sh
+++ b/scripts/utils/upload-artifact.sh
@@ -12,9 +12,11 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi
-UPLOAD_RESPONSE=$(tar "${BASE_PATH:+-C$BASE_PATH}" -cz "${ARTIFACT_PATH:-dist}" | curl -v -X PUT \
+TARBALL=$(cd dist && npm pack --silent)
+
+UPLOAD_RESPONSE=$(curl -v -X PUT \
-H "Content-Type: application/gzip" \
- --data-binary @- "$SIGNED_URL" 2>&1)
+ --data-binary "@dist/$TARBALL" "$SIGNED_URL" 2>&1)
if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
diff --git a/src/index.ts b/src/index.ts
index 68d219d..bdc75ae 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -49,25 +49,6 @@ import {
FilesOpenAICursorPage,
ListFilesResponse,
} from './resources/files';
-import {
- ChatCompletionResponseStreamChunk,
- CompletionResponse,
- EmbeddingsResponse,
- Inference,
- InferenceBatchChatCompletionParams,
- InferenceBatchChatCompletionResponse,
- InferenceBatchCompletionParams,
- InferenceChatCompletionParams,
- InferenceChatCompletionParamsNonStreaming,
- InferenceChatCompletionParamsStreaming,
- InferenceCompletionParams,
- InferenceCompletionParamsNonStreaming,
- InferenceCompletionParamsStreaming,
- InferenceEmbeddingsParams,
- InferenceRerankParams,
- InferenceRerankResponse,
- TokenLogProbs,
-} from './resources/inference';
import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect';
import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations';
import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers';
@@ -109,7 +90,6 @@ import {
TelemetryGetSpanResponse,
TelemetryGetSpanTreeParams,
TelemetryGetSpanTreeResponse,
- TelemetryLogEventParams,
TelemetryQueryMetricsParams,
TelemetryQueryMetricsResponse,
TelemetryQuerySpansParams,
@@ -126,46 +106,22 @@ import {
ToolgroupRegisterParams,
Toolgroups,
} from './resources/toolgroups';
-import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools';
-import {
- ListVectorDBsResponse,
- VectorDBListResponse,
- VectorDBRegisterParams,
- VectorDBRegisterResponse,
- VectorDBRetrieveResponse,
- VectorDBs,
-} from './resources/vector-dbs';
+import { ToolListParams, ToolListResponse, Tools } from './resources/tools';
import {
QueryChunksResponse,
VectorIo,
VectorIoInsertParams,
VectorIoQueryParams,
} from './resources/vector-io';
-import {
- AgentCreateParams,
- AgentCreateResponse,
- AgentListParams,
- AgentListResponse,
- AgentRetrieveResponse,
- Agents,
- InferenceStep,
- MemoryRetrievalStep,
- ShieldCallStep,
- ToolExecutionStep,
- ToolResponse,
-} from './resources/agents/agents';
+import { Alpha } from './resources/alpha/alpha';
import { Chat, ChatCompletionChunk } from './resources/chat/chat';
import {
- BenchmarkConfig,
- Eval,
- EvalCandidate,
- EvalEvaluateRowsAlphaParams,
- EvalEvaluateRowsParams,
- EvalRunEvalAlphaParams,
- EvalRunEvalParams,
- EvaluateResponse,
- Job,
-} from './resources/eval/eval';
+ ConversationCreateParams,
+ ConversationDeleteResponse,
+ ConversationObject,
+ ConversationUpdateParams,
+ Conversations,
+} from './resources/conversations/conversations';
import {
ListModelsResponse,
Model,
@@ -173,14 +129,6 @@ import {
ModelRegisterParams,
Models,
} from './resources/models/models';
-import {
- AlgorithmConfig,
- ListPostTrainingJobsResponse,
- PostTraining,
- PostTrainingJob,
- PostTrainingPreferenceOptimizeParams,
- PostTrainingSupervisedFineTuneParams,
-} from './resources/post-training/post-training';
import {
ResponseCreateParams,
ResponseCreateParamsNonStreaming,
@@ -328,19 +276,15 @@ export class LlamaStackClient extends Core.APIClient {
tools: API.Tools = new API.Tools(this);
toolRuntime: API.ToolRuntime = new API.ToolRuntime(this);
responses: API.Responses = new API.Responses(this);
- agents: API.Agents = new API.Agents(this);
+ conversations: API.Conversations = new API.Conversations(this);
datasets: API.Datasets = new API.Datasets(this);
- eval: API.Eval = new API.Eval(this);
inspect: API.Inspect = new API.Inspect(this);
- inference: API.Inference = new API.Inference(this);
embeddings: API.Embeddings = new API.Embeddings(this);
chat: API.Chat = new API.Chat(this);
completions: API.Completions = new API.Completions(this);
vectorIo: API.VectorIo = new API.VectorIo(this);
- vectorDBs: API.VectorDBs = new API.VectorDBs(this);
vectorStores: API.VectorStores = new API.VectorStores(this);
models: API.Models = new API.Models(this);
- postTraining: API.PostTraining = new API.PostTraining(this);
providers: API.Providers = new API.Providers(this);
routes: API.Routes = new API.Routes(this);
moderations: API.Moderations = new API.Moderations(this);
@@ -352,6 +296,7 @@ export class LlamaStackClient extends Core.APIClient {
scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this);
benchmarks: API.Benchmarks = new API.Benchmarks(this);
files: API.Files = new API.Files(this);
+ alpha: API.Alpha = new API.Alpha(this);
/**
* Check whether the base URL is set to its default.
@@ -408,20 +353,16 @@ LlamaStackClient.Tools = Tools;
LlamaStackClient.ToolRuntime = ToolRuntime;
LlamaStackClient.Responses = Responses;
LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage;
-LlamaStackClient.Agents = Agents;
+LlamaStackClient.Conversations = Conversations;
LlamaStackClient.Datasets = Datasets;
-LlamaStackClient.Eval = Eval;
LlamaStackClient.Inspect = Inspect;
-LlamaStackClient.Inference = Inference;
LlamaStackClient.Embeddings = Embeddings;
LlamaStackClient.Chat = Chat;
LlamaStackClient.Completions = Completions;
LlamaStackClient.VectorIo = VectorIo;
-LlamaStackClient.VectorDBs = VectorDBs;
LlamaStackClient.VectorStores = VectorStores;
LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage;
LlamaStackClient.Models = Models;
-LlamaStackClient.PostTraining = PostTraining;
LlamaStackClient.Providers = Providers;
LlamaStackClient.Routes = Routes;
LlamaStackClient.Moderations = Moderations;
@@ -434,6 +375,7 @@ LlamaStackClient.ScoringFunctions = ScoringFunctions;
LlamaStackClient.Benchmarks = Benchmarks;
LlamaStackClient.Files = Files;
LlamaStackClient.FilesOpenAICursorPage = FilesOpenAICursorPage;
+LlamaStackClient.Alpha = Alpha;
export declare namespace LlamaStackClient {
export type RequestOptions = Core.RequestOptions;
@@ -458,13 +400,7 @@ export declare namespace LlamaStackClient {
type ToolgroupRegisterParams as ToolgroupRegisterParams,
};
- export {
- Tools as Tools,
- type ListToolsResponse as ListToolsResponse,
- type Tool as Tool,
- type ToolListResponse as ToolListResponse,
- type ToolListParams as ToolListParams,
- };
+ export { Tools as Tools, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams };
export {
ToolRuntime as ToolRuntime,
@@ -489,17 +425,11 @@ export declare namespace LlamaStackClient {
};
export {
- Agents as Agents,
- type InferenceStep as InferenceStep,
- type MemoryRetrievalStep as MemoryRetrievalStep,
- type ShieldCallStep as ShieldCallStep,
- type ToolExecutionStep as ToolExecutionStep,
- type ToolResponse as ToolResponse,
- type AgentCreateResponse as AgentCreateResponse,
- type AgentRetrieveResponse as AgentRetrieveResponse,
- type AgentListResponse as AgentListResponse,
- type AgentCreateParams as AgentCreateParams,
- type AgentListParams as AgentListParams,
+ Conversations as Conversations,
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
};
export {
@@ -514,18 +444,6 @@ export declare namespace LlamaStackClient {
type DatasetRegisterParams as DatasetRegisterParams,
};
- export {
- Eval as Eval,
- type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
- type EvaluateResponse as EvaluateResponse,
- type Job as Job,
- type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams as EvalRunEvalParams,
- type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
- };
-
export {
Inspect as Inspect,
type HealthInfo as HealthInfo,
@@ -534,26 +452,6 @@ export declare namespace LlamaStackClient {
type VersionInfo as VersionInfo,
};
- export {
- Inference as Inference,
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-
export {
Embeddings as Embeddings,
type CreateEmbeddingsResponse as CreateEmbeddingsResponse,
@@ -577,15 +475,6 @@ export declare namespace LlamaStackClient {
type VectorIoQueryParams as VectorIoQueryParams,
};
- export {
- VectorDBs as VectorDBs,
- type ListVectorDBsResponse as ListVectorDBsResponse,
- type VectorDBRetrieveResponse as VectorDBRetrieveResponse,
- type VectorDBListResponse as VectorDBListResponse,
- type VectorDBRegisterResponse as VectorDBRegisterResponse,
- type VectorDBRegisterParams as VectorDBRegisterParams,
- };
-
export {
VectorStores as VectorStores,
type ListVectorStoresResponse as ListVectorStoresResponse,
@@ -607,15 +496,6 @@ export declare namespace LlamaStackClient {
type ModelRegisterParams as ModelRegisterParams,
};
- export {
- PostTraining as PostTraining,
- type AlgorithmConfig as AlgorithmConfig,
- type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
- type PostTrainingJob as PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
- };
-
export {
Providers as Providers,
type ListProvidersResponse as ListProvidersResponse,
@@ -667,7 +547,6 @@ export declare namespace LlamaStackClient {
type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse,
type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse,
type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams as TelemetryLogEventParams,
type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams,
type TelemetryQuerySpansParams as TelemetryQuerySpansParams,
type TelemetryQueryTracesParams as TelemetryQueryTracesParams,
@@ -710,19 +589,16 @@ export declare namespace LlamaStackClient {
type FileListParams as FileListParams,
};
+ export { Alpha as Alpha };
+
export type AgentConfig = API.AgentConfig;
- export type BatchCompletion = API.BatchCompletion;
- export type ChatCompletionResponse = API.ChatCompletionResponse;
export type CompletionMessage = API.CompletionMessage;
- export type ContentDelta = API.ContentDelta;
export type Document = API.Document;
export type InterleavedContent = API.InterleavedContent;
export type InterleavedContentItem = API.InterleavedContentItem;
export type Message = API.Message;
- export type Metric = API.Metric;
export type ParamType = API.ParamType;
export type QueryConfig = API.QueryConfig;
- export type QueryGeneratorConfig = API.QueryGeneratorConfig;
export type QueryResult = API.QueryResult;
export type ResponseFormat = API.ResponseFormat;
export type SafetyViolation = API.SafetyViolation;
@@ -730,7 +606,6 @@ export declare namespace LlamaStackClient {
export type ScoringResult = API.ScoringResult;
export type SystemMessage = API.SystemMessage;
export type ToolCall = API.ToolCall;
- export type ToolParamDefinition = API.ToolParamDefinition;
export type ToolResponseMessage = API.ToolResponseMessage;
export type UserMessage = API.UserMessage;
}
diff --git a/src/resources/alpha.ts b/src/resources/alpha.ts
new file mode 100644
index 0000000..446b643
--- /dev/null
+++ b/src/resources/alpha.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './alpha/index';
diff --git a/src/resources/agents.ts b/src/resources/alpha/agents.ts
similarity index 100%
rename from src/resources/agents.ts
rename to src/resources/alpha/agents.ts
diff --git a/src/resources/agents/agents.ts b/src/resources/alpha/agents/agents.ts
similarity index 93%
rename from src/resources/agents/agents.ts
rename to src/resources/alpha/agents/agents.ts
index 35a4d62..6c6a147 100644
--- a/src/resources/agents/agents.ts
+++ b/src/resources/alpha/agents/agents.ts
@@ -1,9 +1,9 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
-import * as Shared from '../shared';
+import { APIResource } from '../../../resource';
+import { isRequestOptions } from '../../../core';
+import * as Core from '../../../core';
+import * as Shared from '../../shared';
import * as SessionAPI from './session';
import {
Session,
@@ -25,7 +25,6 @@ import {
TurnCreateParamsStreaming,
TurnResource,
TurnResponseEvent,
- TurnResponseEventPayload,
TurnResumeParams,
TurnResumeParamsNonStreaming,
TurnResumeParamsStreaming,
@@ -40,14 +39,14 @@ export class Agents extends APIResource {
* Create an agent with the given configuration.
*/
create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/agents', { body, ...options });
+ return this._client.post('/v1alpha/agents', { body, ...options });
}
/**
* Describe an agent by its ID.
*/
retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}`, options);
+ return this._client.get(`/v1alpha/agents/${agentId}`, options);
}
/**
@@ -62,14 +61,14 @@ export class Agents extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.get('/v1/agents', { query, ...options });
+ return this._client.get('/v1alpha/agents', { query, ...options });
}
/**
* Delete an agent by its ID and its associated sessions and turns.
*/
delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}`, {
+ return this._client.delete(`/v1alpha/agents/${agentId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
@@ -355,7 +354,6 @@ export declare namespace Agents {
type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
type Turn as Turn,
type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
type TurnCreateParams as TurnCreateParams,
type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
diff --git a/src/resources/agents/index.ts b/src/resources/alpha/agents/index.ts
similarity index 96%
rename from src/resources/agents/index.ts
rename to src/resources/alpha/agents/index.ts
index 88a44bf..05f354f 100644
--- a/src/resources/agents/index.ts
+++ b/src/resources/alpha/agents/index.ts
@@ -28,7 +28,6 @@ export {
type AgentTurnResponseStreamChunk,
type Turn,
type TurnResponseEvent,
- type TurnResponseEventPayload,
type TurnCreateParams,
type TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming,
diff --git a/src/resources/agents/session.ts b/src/resources/alpha/agents/session.ts
similarity index 87%
rename from src/resources/agents/session.ts
rename to src/resources/alpha/agents/session.ts
index 35c8511..53fa1c8 100644
--- a/src/resources/agents/session.ts
+++ b/src/resources/alpha/agents/session.ts
@@ -1,8 +1,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import { isRequestOptions } from '../../../core';
+import * as Core from '../../../core';
import * as TurnAPI from './turn';
export class SessionResource extends APIResource {
@@ -14,7 +14,7 @@ export class SessionResource extends APIResource {
body: SessionCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options });
+ return this._client.post(`/v1alpha/agents/${agentId}/session`, { body, ...options });
}
/**
@@ -36,7 +36,7 @@ export class SessionResource extends APIResource {
if (isRequestOptions(query)) {
return this.retrieve(agentId, sessionId, {}, query);
}
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options });
+ return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}`, { query, ...options });
}
/**
@@ -56,14 +56,14 @@ export class SessionResource extends APIResource {
if (isRequestOptions(query)) {
return this.list(agentId, {}, query);
}
- return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options });
+ return this._client.get(`/v1alpha/agents/${agentId}/sessions`, { query, ...options });
}
/**
* Delete an agent session by its ID and its associated turns.
*/
delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, {
+ return this._client.delete(`/v1alpha/agents/${agentId}/session/${sessionId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
diff --git a/src/resources/agents/steps.ts b/src/resources/alpha/agents/steps.ts
similarity index 83%
rename from src/resources/agents/steps.ts
rename to src/resources/alpha/agents/steps.ts
index 8d2d821..76c48c3 100644
--- a/src/resources/agents/steps.ts
+++ b/src/resources/alpha/agents/steps.ts
@@ -1,7 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as AgentsAPI from './agents';
export class Steps extends APIResource {
@@ -16,7 +16,7 @@ export class Steps extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return this._client.get(
- `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`,
+ `/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`,
options,
);
}
diff --git a/src/resources/agents/turn.ts b/src/resources/alpha/agents/turn.ts
similarity index 87%
rename from src/resources/agents/turn.ts
rename to src/resources/alpha/agents/turn.ts
index 0273625..33c4065 100644
--- a/src/resources/agents/turn.ts
+++ b/src/resources/alpha/agents/turn.ts
@@ -1,12 +1,12 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { APIPromise } from '../../core';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import { APIPromise } from '../../../core';
+import * as Core from '../../../core';
import * as TurnAPI from './turn';
-import * as Shared from '../shared';
+import * as Shared from '../../shared';
import * as AgentsAPI from './agents';
-import { Stream } from '../../streaming';
+import { Stream } from '../../../streaming';
export class TurnResource extends APIResource {
/**
@@ -36,7 +36,7 @@ export class TurnResource extends APIResource {
body: TurnCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, {
+ return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn`, {
body,
...options,
stream: body.stream ?? false,
@@ -52,7 +52,7 @@ export class TurnResource extends APIResource {
turnId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options);
+ return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options);
}
/**
@@ -89,7 +89,7 @@ export class TurnResource extends APIResource {
body: TurnResumeParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, {
+ return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, {
body,
...options,
stream: body.stream ?? false,
@@ -259,21 +259,16 @@ export interface TurnResponseEvent {
/**
* Event-specific payload containing event data
*/
- payload: TurnResponseEventPayload;
+ payload:
+ | TurnResponseEvent.AgentTurnResponseStepStartPayload
+ | TurnResponseEvent.AgentTurnResponseStepProgressPayload
+ | TurnResponseEvent.AgentTurnResponseStepCompletePayload
+ | TurnResponseEvent.AgentTurnResponseTurnStartPayload
+ | TurnResponseEvent.AgentTurnResponseTurnCompletePayload
+ | TurnResponseEvent.AgentTurnResponseTurnAwaitingInputPayload;
}
-/**
- * Payload for step start events in agent turn responses.
- */
-export type TurnResponseEventPayload =
- | TurnResponseEventPayload.AgentTurnResponseStepStartPayload
- | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload
- | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload
- | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload;
-
-export namespace TurnResponseEventPayload {
+export namespace TurnResponseEvent {
/**
* Payload for step start events in agent turn responses.
*/
@@ -306,7 +301,10 @@ export namespace TurnResponseEventPayload {
/**
* Incremental content changes during step execution
*/
- delta: Shared.ContentDelta;
+ delta:
+ | AgentTurnResponseStepProgressPayload.TextDelta
+ | AgentTurnResponseStepProgressPayload.ImageDelta
+ | AgentTurnResponseStepProgressPayload.ToolCallDelta;
/**
* Type of event being reported
@@ -324,6 +322,58 @@ export namespace TurnResponseEventPayload {
step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
}
+ export namespace AgentTurnResponseStepProgressPayload {
+ /**
+ * A text content delta for streaming responses.
+ */
+ export interface TextDelta {
+ /**
+ * The incremental text content
+ */
+ text: string;
+
+ /**
+ * Discriminator type of the delta. Always "text"
+ */
+ type: 'text';
+ }
+
+ /**
+ * An image content delta for streaming responses.
+ */
+ export interface ImageDelta {
+ /**
+ * The incremental image data as bytes
+ */
+ image: string;
+
+ /**
+ * Discriminator type of the delta. Always "image"
+ */
+ type: 'image';
+ }
+
+ /**
+ * A tool call content delta for streaming responses.
+ */
+ export interface ToolCallDelta {
+ /**
+ * Current parsing status of the tool call
+ */
+ parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded';
+
+ /**
+ * Either an in-progress tool call string or the final parsed tool call
+ */
+ tool_call: string | Shared.ToolCall;
+
+ /**
+ * Discriminator type of the delta. Always "tool_call"
+ */
+ type: 'tool_call';
+ }
+ }
+
/**
* Payload for step completion events in agent turn responses.
*/
@@ -621,7 +671,6 @@ export declare namespace TurnResource {
type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
type Turn as Turn,
type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
type TurnCreateParams as TurnCreateParams,
type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts
new file mode 100644
index 0000000..3565610
--- /dev/null
+++ b/src/resources/alpha/alpha.ts
@@ -0,0 +1,93 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as InferenceAPI from './inference';
+import { Inference, InferenceRerankParams, InferenceRerankResponse } from './inference';
+import * as AgentsAPI from './agents/agents';
+import {
+ AgentCreateParams,
+ AgentCreateResponse,
+ AgentListParams,
+ AgentListResponse,
+ AgentRetrieveResponse,
+ Agents,
+ InferenceStep,
+ MemoryRetrievalStep,
+ ShieldCallStep,
+ ToolExecutionStep,
+ ToolResponse,
+} from './agents/agents';
+import * as EvalAPI from './eval/eval';
+import {
+ BenchmarkConfig,
+ Eval,
+ EvalEvaluateRowsAlphaParams,
+ EvalEvaluateRowsParams,
+ EvalRunEvalAlphaParams,
+ EvalRunEvalParams,
+ EvaluateResponse,
+ Job,
+} from './eval/eval';
+import * as PostTrainingAPI from './post-training/post-training';
+import {
+ AlgorithmConfig,
+ ListPostTrainingJobsResponse,
+ PostTraining,
+ PostTrainingJob,
+ PostTrainingPreferenceOptimizeParams,
+ PostTrainingSupervisedFineTuneParams,
+} from './post-training/post-training';
+
+export class Alpha extends APIResource {
+ inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client);
+ postTraining: PostTrainingAPI.PostTraining = new PostTrainingAPI.PostTraining(this._client);
+ eval: EvalAPI.Eval = new EvalAPI.Eval(this._client);
+ agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client);
+}
+
+Alpha.Inference = Inference;
+Alpha.PostTraining = PostTraining;
+Alpha.Eval = Eval;
+Alpha.Agents = Agents;
+
+export declare namespace Alpha {
+ export {
+ Inference as Inference,
+ type InferenceRerankResponse as InferenceRerankResponse,
+ type InferenceRerankParams as InferenceRerankParams,
+ };
+
+ export {
+ PostTraining as PostTraining,
+ type AlgorithmConfig as AlgorithmConfig,
+ type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
+ type PostTrainingJob as PostTrainingJob,
+ type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
+ type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
+ };
+
+ export {
+ Eval as Eval,
+ type BenchmarkConfig as BenchmarkConfig,
+ type EvaluateResponse as EvaluateResponse,
+ type Job as Job,
+ type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
+ type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
+ type EvalRunEvalParams as EvalRunEvalParams,
+ type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
+ };
+
+ export {
+ Agents as Agents,
+ type InferenceStep as InferenceStep,
+ type MemoryRetrievalStep as MemoryRetrievalStep,
+ type ShieldCallStep as ShieldCallStep,
+ type ToolExecutionStep as ToolExecutionStep,
+ type ToolResponse as ToolResponse,
+ type AgentCreateResponse as AgentCreateResponse,
+ type AgentRetrieveResponse as AgentRetrieveResponse,
+ type AgentListResponse as AgentListResponse,
+ type AgentCreateParams as AgentCreateParams,
+ type AgentListParams as AgentListParams,
+ };
+}
diff --git a/src/resources/eval.ts b/src/resources/alpha/eval.ts
similarity index 100%
rename from src/resources/eval.ts
rename to src/resources/alpha/eval.ts
diff --git a/src/resources/eval/eval.ts b/src/resources/alpha/eval/eval.ts
similarity index 83%
rename from src/resources/eval/eval.ts
rename to src/resources/alpha/eval/eval.ts
index 961b24e..97f0cfb 100644
--- a/src/resources/eval/eval.ts
+++ b/src/resources/alpha/eval/eval.ts
@@ -1,9 +1,9 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as ScoringFunctionsAPI from '../scoring-functions';
-import * as Shared from '../shared';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
+import * as ScoringFunctionsAPI from '../../scoring-functions';
+import * as Shared from '../../shared';
import * as JobsAPI from './jobs';
import { Jobs } from './jobs';
@@ -18,7 +18,7 @@ export class Eval extends APIResource {
body: EvalEvaluateRowsParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
}
/**
@@ -29,14 +29,14 @@ export class Eval extends APIResource {
body: EvalEvaluateRowsAlphaParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
}
/**
* Run an evaluation on a benchmark.
*/
runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
}
/**
@@ -47,7 +47,7 @@ export class Eval extends APIResource {
body: EvalRunEvalAlphaParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
}
}
@@ -58,7 +58,7 @@ export interface BenchmarkConfig {
/**
* The candidate to evaluate.
*/
- eval_candidate: EvalCandidate;
+ eval_candidate: BenchmarkConfig.ModelCandidate | BenchmarkConfig.AgentCandidate;
/**
* Map between scoring function id and parameters for each scoring function you
@@ -73,12 +73,7 @@ export interface BenchmarkConfig {
num_examples?: number;
}
-/**
- * A model candidate for evaluation.
- */
-export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate;
-
-export namespace EvalCandidate {
+export namespace BenchmarkConfig {
/**
* A model candidate for evaluation.
*/
@@ -197,7 +192,6 @@ Eval.Jobs = Jobs;
export declare namespace Eval {
export {
type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
type EvaluateResponse as EvaluateResponse,
type Job as Job,
type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
diff --git a/src/resources/eval/index.ts b/src/resources/alpha/eval/index.ts
similarity index 93%
rename from src/resources/eval/index.ts
rename to src/resources/alpha/eval/index.ts
index e8c35f3..ecdb275 100644
--- a/src/resources/eval/index.ts
+++ b/src/resources/alpha/eval/index.ts
@@ -3,7 +3,6 @@
export {
Eval,
type BenchmarkConfig,
- type EvalCandidate,
type EvaluateResponse,
type Job,
type EvalEvaluateRowsParams,
diff --git a/src/resources/eval/jobs.ts b/src/resources/alpha/eval/jobs.ts
similarity index 65%
rename from src/resources/eval/jobs.ts
rename to src/resources/alpha/eval/jobs.ts
index 13d4a4d..3a830bd 100644
--- a/src/resources/eval/jobs.ts
+++ b/src/resources/alpha/eval/jobs.ts
@@ -1,7 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as EvalAPI from './eval';
export class Jobs extends APIResource {
@@ -13,14 +13,14 @@ export class Jobs extends APIResource {
jobId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options);
}
/**
* Cancel a job.
*/
cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, {
+ return this._client.delete(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
@@ -30,6 +30,6 @@ export class Jobs extends APIResource {
* Get the status of a job.
*/
status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options);
}
}
diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts
new file mode 100644
index 0000000..082839d
--- /dev/null
+++ b/src/resources/alpha/index.ts
@@ -0,0 +1,35 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Agents,
+ type InferenceStep,
+ type MemoryRetrievalStep,
+ type ShieldCallStep,
+ type ToolExecutionStep,
+ type ToolResponse,
+ type AgentCreateResponse,
+ type AgentRetrieveResponse,
+ type AgentListResponse,
+ type AgentCreateParams,
+ type AgentListParams,
+} from './agents/index';
+export { Alpha } from './alpha';
+export {
+ Eval,
+ type BenchmarkConfig,
+ type EvaluateResponse,
+ type Job,
+ type EvalEvaluateRowsParams,
+ type EvalEvaluateRowsAlphaParams,
+ type EvalRunEvalParams,
+ type EvalRunEvalAlphaParams,
+} from './eval/index';
+export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference';
+export {
+ PostTraining,
+ type AlgorithmConfig,
+ type ListPostTrainingJobsResponse,
+ type PostTrainingJob,
+ type PostTrainingPreferenceOptimizeParams,
+ type PostTrainingSupervisedFineTuneParams,
+} from './post-training/index';
diff --git a/src/resources/alpha/inference.ts b/src/resources/alpha/inference.ts
new file mode 100644
index 0000000..ca6db21
--- /dev/null
+++ b/src/resources/alpha/inference.ts
@@ -0,0 +1,178 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+
+export class Inference extends APIResource {
+ /**
+ * Rerank a list of documents based on their relevance to a query.
+ */
+ rerank(
+ body: InferenceRerankParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return (
+ this._client.post('/v1alpha/inference/rerank', { body, ...options }) as Core.APIPromise<{
+ data: InferenceRerankResponse;
+ }>
+ )._thenUnwrap((obj) => obj.data);
+ }
+}
+
+/**
+ * List of rerank result objects, sorted by relevance score (descending)
+ */
+export type InferenceRerankResponse = Array;
+
+export namespace InferenceRerankResponse {
+ /**
+ * A single rerank result from a reranking response.
+ */
+ export interface InferenceRerankResponseItem {
+ /**
+ * The original index of the document in the input list
+ */
+ index: number;
+
+ /**
+ * The relevance score from the model output. Values are inverted when applicable
+ * so that higher scores indicate greater relevance.
+ */
+ relevance_score: number;
+ }
+}
+
+export interface InferenceRerankParams {
+ /**
+ * List of items to rerank. Each item can be a string, text content part, or image
+ * content part. Each input must not exceed the model's max input token length.
+ */
+ items: Array<
+ | string
+ | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
+ | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam
+ >;
+
+ /**
+ * The identifier of the reranking model to use.
+ */
+ model: string;
+
+ /**
+ * The search query to rank items against. Can be a string, text content part, or
+ * image content part. The input must not exceed the model's max input token
+ * length.
+ */
+ query:
+ | string
+ | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
+ | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam;
+
+ /**
+ * (Optional) Maximum number of results to return. Default: returns all.
+ */
+ max_num_results?: number;
+}
+
+export namespace InferenceRerankParams {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+}
+
+export declare namespace Inference {
+ export {
+ type InferenceRerankResponse as InferenceRerankResponse,
+ type InferenceRerankParams as InferenceRerankParams,
+ };
+}
diff --git a/src/resources/post-training.ts b/src/resources/alpha/post-training.ts
similarity index 100%
rename from src/resources/post-training.ts
rename to src/resources/alpha/post-training.ts
diff --git a/src/resources/post-training/index.ts b/src/resources/alpha/post-training/index.ts
similarity index 100%
rename from src/resources/post-training/index.ts
rename to src/resources/alpha/post-training/index.ts
diff --git a/src/resources/post-training/job.ts b/src/resources/alpha/post-training/job.ts
similarity index 89%
rename from src/resources/post-training/job.ts
rename to src/resources/alpha/post-training/job.ts
index a250ac9..ba3de54 100644
--- a/src/resources/post-training/job.ts
+++ b/src/resources/alpha/post-training/job.ts
@@ -1,20 +1,15 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as PostTrainingAPI from './post-training';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
export class Job extends APIResource {
/**
* Get all training jobs.
*/
- list(
- options?: Core.RequestOptions,
- ): Core.APIPromise> {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{
- data: Array;
- }>
+ this._client.get('/v1alpha/post-training/jobs', options) as Core.APIPromise<{ data: JobListResponse }>
)._thenUnwrap((obj) => obj.data);
}
@@ -22,14 +17,14 @@ export class Job extends APIResource {
* Get the artifacts of a training job.
*/
artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/artifacts', { query, ...options });
+ return this._client.get('/v1alpha/post-training/job/artifacts', { query, ...options });
}
/**
* Cancel a training job.
*/
cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/post-training/job/cancel', {
+ return this._client.post('/v1alpha/post-training/job/cancel', {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
@@ -40,7 +35,7 @@ export class Job extends APIResource {
* Get the status of a training job.
*/
status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/status', { query, ...options });
+ return this._client.get('/v1alpha/post-training/job/status', { query, ...options });
}
}
diff --git a/src/resources/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts
similarity index 96%
rename from src/resources/post-training/post-training.ts
rename to src/resources/alpha/post-training/post-training.ts
index 8f6eb3f..06edcfc 100644
--- a/src/resources/post-training/post-training.ts
+++ b/src/resources/alpha/post-training/post-training.ts
@@ -1,7 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as JobAPI from './job';
import {
Job,
@@ -23,7 +23,7 @@ export class PostTraining extends APIResource {
body: PostTrainingPreferenceOptimizeParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/post-training/preference-optimize', { body, ...options });
+ return this._client.post('/v1alpha/post-training/preference-optimize', { body, ...options });
}
/**
@@ -33,7 +33,7 @@ export class PostTraining extends APIResource {
body: PostTrainingSupervisedFineTuneParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options });
+ return this._client.post('/v1alpha/post-training/supervised-fine-tune', { body, ...options });
}
}
@@ -110,13 +110,7 @@ export namespace AlgorithmConfig {
}
export interface ListPostTrainingJobsResponse {
- data: Array;
-}
-
-export namespace ListPostTrainingJobsResponse {
- export interface Data {
- job_uuid: string;
- }
+ data: JobAPI.JobListResponse;
}
export interface PostTrainingJob {
diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts
index b6b8363..3b33eab 100644
--- a/src/resources/benchmarks.ts
+++ b/src/resources/benchmarks.ts
@@ -8,7 +8,7 @@ export class Benchmarks extends APIResource {
* Get a benchmark by its ID.
*/
retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}`, options);
}
/**
@@ -16,7 +16,9 @@ export class Benchmarks extends APIResource {
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }>
+ this._client.get('/v1alpha/eval/benchmarks', options) as Core.APIPromise<{
+ data: BenchmarkListResponse;
+ }>
)._thenUnwrap((obj) => obj.data);
}
@@ -24,7 +26,7 @@ export class Benchmarks extends APIResource {
* Register a benchmark.
*/
register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/eval/benchmarks', {
+ return this._client.post('/v1alpha/eval/benchmarks', {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index b43e6d3..cc0e0eb 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -46,6 +46,11 @@ export interface ChatCompletionChunk {
* The object type, which will be "chat.completion.chunk"
*/
object: 'chat.completion.chunk';
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ usage?: ChatCompletionChunk.Usage;
}
export namespace ChatCompletionChunk {
@@ -84,6 +89,11 @@ export namespace ChatCompletionChunk {
*/
content?: string;
+ /**
+ * (Optional) The reasoning content from the model (non-standard, for o1/o3 models)
+ */
+ reasoning_content?: string;
+
/**
* (Optional) The refusal of the delta
*/
@@ -217,6 +227,58 @@ export namespace ChatCompletionChunk {
}
}
}
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
Chat.Completions = Completions;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index c7ed5e8..7c8f133 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -11,8 +11,8 @@ import { Stream } from '../../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible chat completion for the given messages using the
- * specified model.
+ * Create chat completions. Generate an OpenAI-compatible chat completion for the
+ * given messages using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -30,22 +30,20 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/chat/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Describe a chat completion by its ID.
+ * Get chat completion. Describe a chat completion by its ID.
*/
retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options);
+ return this._client.get(`/v1/chat/completions/${completionId}`, options);
}
/**
- * List all chat completions.
+ * List chat completions.
*/
list(
query?: CompletionListParams,
@@ -61,11 +59,10 @@ export class Completions extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList(
- '/v1/openai/v1/chat/completions',
- CompletionListResponsesOpenAICursorPage,
- { query, ...options },
- );
+ return this._client.getAPIList('/v1/chat/completions', CompletionListResponsesOpenAICursorPage, {
+ query,
+ ...options,
+ });
}
}
@@ -107,6 +104,11 @@ export namespace CompletionCreateResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: OpenAIChatCompletion.Usage;
}
export namespace OpenAIChatCompletion {
@@ -504,6 +506,58 @@ export namespace CompletionCreateResponse {
}
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
}
@@ -540,6 +594,11 @@ export interface CompletionRetrieveResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionRetrieveResponse.Usage;
}
export namespace CompletionRetrieveResponse {
@@ -1227,6 +1286,58 @@ export namespace CompletionRetrieveResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export interface CompletionListResponse {
@@ -1262,6 +1373,11 @@ export interface CompletionListResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionListResponse.Usage;
}
export namespace CompletionListResponse {
@@ -1949,6 +2065,58 @@ export namespace CompletionListResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 0ade7ab..fe49a25 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -8,8 +8,8 @@ import { Stream } from '../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible completion for the given prompt using the
- * specified model.
+ * Create completion. Generate an OpenAI-compatible completion for the given prompt
+ * using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -27,11 +27,9 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
}
@@ -174,8 +172,6 @@ export interface CompletionCreateParamsBase {
*/
frequency_penalty?: number;
- guided_choice?: Array;
-
/**
* (Optional) The logit bias to use.
*/
@@ -201,8 +197,6 @@ export interface CompletionCreateParamsBase {
*/
presence_penalty?: number;
- prompt_logprobs?: number;
-
/**
* (Optional) The seed to use.
*/
diff --git a/src/resources/conversations.ts b/src/resources/conversations.ts
new file mode 100644
index 0000000..6b50950
--- /dev/null
+++ b/src/resources/conversations.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './conversations/index';
diff --git a/src/resources/conversations/conversations.ts b/src/resources/conversations/conversations.ts
new file mode 100644
index 0000000..faa7d4c
--- /dev/null
+++ b/src/resources/conversations/conversations.ts
@@ -0,0 +1,479 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+import * as ItemsAPI from './items';
+import {
+ ItemCreateParams,
+ ItemCreateResponse,
+ ItemGetResponse,
+ ItemListParams,
+ ItemListResponse,
+ Items,
+} from './items';
+
+export class Conversations extends APIResource {
+ items: ItemsAPI.Items = new ItemsAPI.Items(this._client);
+
+ /**
+ * Create a conversation.
+ */
+ create(body: ConversationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/conversations', { body, ...options });
+ }
+
+ /**
+ * Get a conversation with the given ID.
+ */
+ retrieve(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}`, options);
+ }
+
+ /**
+ * Update a conversation's metadata with the given ID.
+ */
+ update(
+ conversationId: string,
+ body: ConversationUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}`, { body, ...options });
+ }
+
+ /**
+ * Delete a conversation with the given ID.
+ */
+ delete(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/v1/conversations/${conversationId}`, options);
+ }
+}
+
+/**
+ * OpenAI-compatible conversation object.
+ */
+export interface ConversationObject {
+ id: string;
+
+ created_at: number;
+
+ object: 'conversation';
+
+ items?: Array;
+
+ metadata?: { [key: string]: string };
+}
+
+/**
+ * Response for deleted conversation.
+ */
+export interface ConversationDeleteResponse {
+ id: string;
+
+ deleted: boolean;
+
+ object: string;
+}
+
+export interface ConversationCreateParams {
+ /**
+ * Initial items to include in the conversation context.
+ */
+ items?: Array<
+ | ConversationCreateParams.OpenAIResponseMessage
+ | ConversationCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata?: { [key: string]: string };
+}
+
+export namespace ConversationCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ConversationUpdateParams {
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata: { [key: string]: string };
+}
+
+Conversations.Items = Items;
+
+export declare namespace Conversations {
+ export {
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
+ };
+
+ export {
+ Items as Items,
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/conversations/index.ts b/src/resources/conversations/index.ts
new file mode 100644
index 0000000..f60086f
--- /dev/null
+++ b/src/resources/conversations/index.ts
@@ -0,0 +1,17 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations';
+export {
+ Items,
+ type ItemCreateResponse,
+ type ItemListResponse,
+ type ItemGetResponse,
+ type ItemCreateParams,
+ type ItemListParams,
+} from './items';
diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts
new file mode 100644
index 0000000..5e15969
--- /dev/null
+++ b/src/resources/conversations/items.ts
@@ -0,0 +1,1583 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+
+export class Items extends APIResource {
+ /**
+ * Create items in the conversation.
+ */
+ create(
+ conversationId: string,
+ body: ItemCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}/items`, { body, ...options });
+ }
+
+ /**
+ * List items in the conversation.
+ */
+ list(
+ conversationId: string,
+ query: ItemListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}/items`, { query, ...options });
+ }
+
+ /**
+ * Retrieve a conversation item.
+ */
+ get(
+ conversationId: string,
+ itemId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}/items/${itemId}`, options);
+ }
+}
+
+/**
+ * List of conversation items with pagination.
+ */
+export interface ItemCreateResponse {
+ data: Array<
+ | ItemCreateResponse.OpenAIResponseMessage
+ | ItemCreateResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ has_more: boolean;
+
+ object: string;
+
+ first_id?: string;
+
+ last_id?: string;
+}
+
+export namespace ItemCreateResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * List of conversation items with pagination.
+ */
+export interface ItemListResponse {
+ data: Array<
+ | ItemListResponse.OpenAIResponseMessage
+ | ItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemListResponse.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ has_more: boolean;
+
+ object: string;
+
+ first_id?: string;
+
+ last_id?: string;
+}
+
+export namespace ItemListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+export type ItemGetResponse =
+ | ItemGetResponse.OpenAIResponseMessage
+ | ItemGetResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpListTools;
+
+export namespace ItemGetResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemCreateParams {
+ /**
+ * Items to include in the conversation context.
+ */
+ items: Array<
+ | ItemCreateParams.OpenAIResponseMessage
+ | ItemCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+}
+
+export namespace ItemCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface UnionMember2 {
+ annotations: Array<
+ | UnionMember2.OpenAIResponseAnnotationFileCitation
+ | UnionMember2.OpenAIResponseAnnotationCitation
+ | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
+ | UnionMember2.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace UnionMember2 {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemListParams {
+ /**
+ * An item ID to list items after, used in pagination.
+ */
+ after: string | unknown;
+
+ /**
+ * Specify additional output data to include in the response.
+ */
+ include:
+ | Array<
+ | 'code_interpreter_call.outputs'
+ | 'computer_call_output.output.image_url'
+ | 'file_search_call.results'
+ | 'message.input_image.image_url'
+ | 'message.output_text.logprobs'
+ | 'reasoning.encrypted_content'
+ >
+ | unknown;
+
+ /**
+ * A limit on the number of objects to be returned (1-100, default 20).
+ */
+ limit: number | unknown;
+
+ /**
+ * The order to return items in (asc or desc, default desc).
+ */
+ order: 'asc' | 'desc' | unknown;
+}
+
+export declare namespace Items {
+ export {
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts
index 5ed6661..7a33c4f 100644
--- a/src/resources/datasets.ts
+++ b/src/resources/datasets.ts
@@ -9,7 +9,7 @@ export class Datasets extends APIResource {
* Get a dataset by its ID.
*/
retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/datasets/${datasetId}`, options);
+ return this._client.get(`/v1beta/datasets/${datasetId}`, options);
}
/**
@@ -17,7 +17,7 @@ export class Datasets extends APIResource {
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }>
+ this._client.get('/v1beta/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }>
)._thenUnwrap((obj) => obj.data);
}
@@ -29,7 +29,7 @@ export class Datasets extends APIResource {
body: DatasetAppendrowsParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, {
+ return this._client.post(`/v1beta/datasetio/append-rows/${datasetId}`, {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
@@ -61,7 +61,7 @@ export class Datasets extends APIResource {
if (isRequestOptions(query)) {
return this.iterrows(datasetId, {}, query);
}
- return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options });
+ return this._client.get(`/v1beta/datasetio/iterrows/${datasetId}`, { query, ...options });
}
/**
@@ -71,14 +71,14 @@ export class Datasets extends APIResource {
body: DatasetRegisterParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/datasets', { body, ...options });
+ return this._client.post('/v1beta/datasets', { body, ...options });
}
/**
* Unregister a dataset by its ID.
*/
unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/datasets/${datasetId}`, {
+ return this._client.delete(`/v1beta/datasets/${datasetId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index 89758af..f07ff14 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -5,14 +5,14 @@ import * as Core from '../core';
export class Embeddings extends APIResource {
/**
- * Generate OpenAI-compatible embeddings for the given input using the specified
- * model.
+ * Create embeddings. Generate OpenAI-compatible embeddings for the given input
+ * using the specified model.
*/
create(
body: EmbeddingCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/openai/v1/embeddings', { body, ...options });
+ return this._client.post('/v1/embeddings', { body, ...options });
}
}
diff --git a/src/resources/files.ts b/src/resources/files.ts
index 4dc5223..e59026e 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -7,25 +7,27 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../pagination';
export class Files extends APIResource {
/**
- * Upload a file that can be used across various endpoints. The file upload should
- * be a multipart form request with:
+ * Upload file. Upload a file that can be used across various endpoints.
+ *
+ * The file upload should be a multipart form request with:
*
* - file: The File object (not file name) to be uploaded.
* - purpose: The intended purpose of the uploaded file.
+ * - expires_after: Optional form values describing expiration for the file.
*/
create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
+ return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
}
/**
- * Returns information about a specific file.
+ * Retrieve file. Returns information about a specific file.
*/
retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.get(`/v1/files/${fileId}`, options);
}
/**
- * Returns a list of files that belong to the user's organization.
+ * List files. Returns a list of files that belong to the user's organization.
*/
list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise;
list(options?: Core.RequestOptions): Core.PagePromise;
@@ -36,21 +38,21 @@ export class Files extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/files', FilesOpenAICursorPage, { query, ...options });
+ return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options });
}
/**
- * Delete a file.
+ * Delete file.
*/
delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.delete(`/v1/files/${fileId}`, options);
}
/**
- * Returns the contents of the specified file.
+ * Retrieve file content. Returns the contents of the specified file.
*/
content(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options);
+ return this._client.get(`/v1/files/${fileId}/content`, options);
}
}
@@ -155,6 +157,28 @@ export interface FileCreateParams {
* Valid purpose values for OpenAI Files API.
*/
purpose: 'assistants' | 'batch';
+
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ expires_after?: FileCreateParams.ExpiresAfter;
+}
+
+export namespace FileCreateParams {
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ export interface ExpiresAfter {
+ anchor: 'created_at';
+
+ seconds: number;
+ }
}
export interface FileListParams extends OpenAICursorPageParams {
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 58ad928..5003b67 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,19 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export * from './shared';
-export {
- Agents,
- type InferenceStep,
- type MemoryRetrievalStep,
- type ShieldCallStep,
- type ToolExecutionStep,
- type ToolResponse,
- type AgentCreateResponse,
- type AgentRetrieveResponse,
- type AgentListResponse,
- type AgentCreateParams,
- type AgentListParams,
-} from './agents/agents';
+export { Alpha } from './alpha/alpha';
export {
Benchmarks,
type Benchmark,
@@ -29,6 +17,13 @@ export {
type CompletionCreateParamsNonStreaming,
type CompletionCreateParamsStreaming,
} from './completions';
+export {
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations/conversations';
export {
Datasets,
type ListDatasetsResponse,
@@ -41,17 +36,6 @@ export {
type DatasetRegisterParams,
} from './datasets';
export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings';
-export {
- Eval,
- type BenchmarkConfig,
- type EvalCandidate,
- type EvaluateResponse,
- type Job,
- type EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams,
- type EvalRunEvalAlphaParams,
-} from './eval/eval';
export {
FilesOpenAICursorPage,
Files,
@@ -62,25 +46,6 @@ export {
type FileCreateParams,
type FileListParams,
} from './files';
-export {
- Inference,
- type ChatCompletionResponseStreamChunk,
- type CompletionResponse,
- type EmbeddingsResponse,
- type TokenLogProbs,
- type InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse,
- type InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams,
- type InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams,
- type InferenceRerankParams,
-} from './inference';
export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect';
export {
Models,
@@ -90,14 +55,6 @@ export {
type ModelRegisterParams,
} from './models/models';
export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations';
-export {
- PostTraining,
- type AlgorithmConfig,
- type ListPostTrainingJobsResponse,
- type PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams,
-} from './post-training/post-training';
export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers';
export {
ResponseListResponsesOpenAICursorPage,
@@ -153,7 +110,6 @@ export {
type TelemetryQuerySpansResponse,
type TelemetryQueryTracesResponse,
type TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams,
type TelemetryQueryMetricsParams,
type TelemetryQuerySpansParams,
type TelemetryQueryTracesParams,
@@ -174,21 +130,7 @@ export {
type ToolgroupListResponse,
type ToolgroupRegisterParams,
} from './toolgroups';
-export {
- Tools,
- type ListToolsResponse,
- type Tool,
- type ToolListResponse,
- type ToolListParams,
-} from './tools';
-export {
- VectorDBs,
- type ListVectorDBsResponse,
- type VectorDBRetrieveResponse,
- type VectorDBListResponse,
- type VectorDBRegisterResponse,
- type VectorDBRegisterParams,
-} from './vector-dbs';
+export { Tools, type ToolListResponse, type ToolListParams } from './tools';
export {
VectorIo,
type QueryChunksResponse,
diff --git a/src/resources/inference.ts b/src/resources/inference.ts
deleted file mode 100644
index a6f3e1e..0000000
--- a/src/resources/inference.ts
+++ /dev/null
@@ -1,762 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import { APIPromise } from '../core';
-import * as Core from '../core';
-import * as InferenceAPI from './inference';
-import * as Shared from './shared';
-import { Stream } from '../streaming';
-
-export class Inference extends APIResource {
- /**
- * Generate chat completions for a batch of messages using the specified model.
- */
- batchChatCompletion(
- body: InferenceBatchChatCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-chat-completion', { body, ...options });
- }
-
- /**
- * Generate completions for a batch of content using the specified model.
- */
- batchCompletion(
- body: InferenceBatchCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-completion', { body, ...options });
- }
-
- /**
- * Generate a chat completion for the given messages using the specified model.
- *
- * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions.
- */
- chatCompletion(
- body: InferenceChatCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- chatCompletion(
- body: InferenceChatCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- chatCompletion(
- body: InferenceChatCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | Shared.ChatCompletionResponse>;
- chatCompletion(
- body: InferenceChatCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/chat-completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate a completion for the given content using the specified model.
- *
- * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.
- */
- completion(
- body: InferenceCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- completion(
- body: InferenceCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- completion(
- body: InferenceCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | CompletionResponse>;
- completion(
- body: InferenceCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate embeddings for content pieces using the specified model.
- *
- * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.
- */
- embeddings(
- body: InferenceEmbeddingsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/embeddings', { body, ...options });
- }
-
- /**
- * Rerank a list of documents based on their relevance to a query.
- */
- rerank(
- body: InferenceRerankParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post('/v1/inference/rerank', { body, ...options }) as Core.APIPromise<{
- data: InferenceRerankResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-}
-
-/**
- * A chunk of a streamed chat completion response.
- */
-export interface ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- event: ChatCompletionResponseStreamChunk.Event;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-export namespace ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- export interface Event {
- /**
- * Content generated since last event. This can be one or more tokens, or a tool
- * call.
- */
- delta: Shared.ContentDelta;
-
- /**
- * Type of the event
- */
- event_type: 'start' | 'complete' | 'progress';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * Optional reason why generation stopped, if complete
- */
- stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
- }
-}
-
-/**
- * Response from a completion request.
- */
-export interface CompletionResponse {
- /**
- * The generated completion text
- */
- content: string;
-
- /**
- * Reason why generation stopped
- */
- stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-/**
- * Response containing generated embeddings.
- */
-export interface EmbeddingsResponse {
- /**
- * List of embedding vectors, one per input content. Each embedding is a list of
- * floats. The dimensionality of the embedding is model-specific; you can check
- * model metadata using /models/{model_id}
- */
- embeddings: Array>;
-}
-
-/**
- * Log probabilities for generated tokens.
- */
-export interface TokenLogProbs {
- /**
- * Dictionary mapping tokens to their log probabilities
- */
- logprobs_by_token: { [key: string]: number };
-}
-
-/**
- * Response from a batch chat completion request.
- */
-export interface InferenceBatchChatCompletionResponse {
- /**
- * List of chat completion responses, one for each conversation in the batch
- */
- batch: Array;
-}
-
-/**
- * List of rerank result objects, sorted by relevance score (descending)
- */
-export type InferenceRerankResponse = Array;
-
-export namespace InferenceRerankResponse {
- /**
- * A single rerank result from a reranking response.
- */
- export interface InferenceRerankResponseItem {
- /**
- * The original index of the document in the input list
- */
- index: number;
-
- /**
- * The relevance score from the model output. Values are inverted when applicable
- * so that higher scores indicate greater relevance.
- */
- relevance_score: number;
- }
-}
-
-export interface InferenceBatchChatCompletionParams {
- /**
- * The messages to generate completions for.
- */
- messages_batch: Array>;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceBatchChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceBatchChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-}
-
-export interface InferenceBatchCompletionParams {
- /**
- * The content to generate completions for.
- */
- content_batch: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-}
-
-export namespace InferenceBatchCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-}
-
-export type InferenceChatCompletionParams =
- | InferenceChatCompletionParamsNonStreaming
- | InferenceChatCompletionParamsStreaming;
-
-export interface InferenceChatCompletionParamsBase {
- /**
- * List of messages in the conversation.
- */
- messages: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding. There are two
- * options: - `ResponseFormat.json_schema`: The grammar is a JSON schema. Most
- * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF
- * grammar. This format is more flexible, but not all providers support it.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-
- /**
- * (Optional) Whether tool use is required or automatic. Defaults to
- * ToolChoice.auto. .. deprecated:: Use tool_config instead.
- */
- tool_choice?: 'auto' | 'required' | 'none';
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls. .. deprecated:: Use
- * tool_config instead.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-
- export type InferenceChatCompletionParamsNonStreaming =
- InferenceAPI.InferenceChatCompletionParamsNonStreaming;
- export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming;
-}
-
-export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export type InferenceCompletionParams =
- | InferenceCompletionParamsNonStreaming
- | InferenceCompletionParamsStreaming;
-
-export interface InferenceCompletionParamsBase {
- /**
- * The content to generate a completion for.
- */
- content: Shared.InterleavedContent;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-}
-
-export namespace InferenceCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming;
- export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming;
-}
-
-export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export interface InferenceEmbeddingsParams {
- /**
- * List of contents to generate embeddings for. Each content can be a string or an
- * InterleavedContentItem (and hence can be multimodal). The behavior depends on
- * the model and provider. Some models may only support text.
- */
- contents: Array | Array;
-
- /**
- * The identifier of the model to use. The model must be an embedding model
- * registered with Llama Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) Output dimensionality for the embeddings. Only supported by
- * Matryoshka models.
- */
- output_dimension?: number;
-
- /**
- * (Optional) How is the embedding being used? This is only supported by asymmetric
- * embedding models.
- */
- task_type?: 'query' | 'document';
-
- /**
- * (Optional) Config for how to truncate text for embedding when text is longer
- * than the model's max sequence length.
- */
- text_truncation?: 'none' | 'start' | 'end';
-}
-
-export interface InferenceRerankParams {
- /**
- * List of items to rerank. Each item can be a string, text content part, or image
- * content part. Each input must not exceed the model's max input token length.
- */
- items: Array<
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam
- >;
-
- /**
- * The identifier of the reranking model to use.
- */
- model: string;
-
- /**
- * The search query to rank items against. Can be a string, text content part, or
- * image content part. The input must not exceed the model's max input token
- * length.
- */
- query:
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam;
-
- /**
- * (Optional) Maximum number of results to return. Default: returns all.
- */
- max_num_results?: number;
-}
-
-export namespace InferenceRerankParams {
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-}
-
-export declare namespace Inference {
- export {
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-}
diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts
index 4e5d87c..0c10896 100644
--- a/src/resources/inspect.ts
+++ b/src/resources/inspect.ts
@@ -5,14 +5,14 @@ import * as Core from '../core';
export class Inspect extends APIResource {
/**
- * Get the current health status of the service.
+ * Get health status. Get the current health status of the service.
*/
health(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/health', options);
}
/**
- * Get the version of the service.
+ * Get version. Get the version of the service.
*/
version(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/version', options);
diff --git a/src/resources/models/index.ts b/src/resources/models/index.ts
index de6ecf3..e05a022 100644
--- a/src/resources/models/index.ts
+++ b/src/resources/models/index.ts
@@ -7,4 +7,4 @@ export {
type ModelListResponse,
type ModelRegisterParams,
} from './models';
-export { OpenAI, type OpenAIListResponse } from './openai';
+export { OpenAI } from './openai';
diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts
index d72281f..041245a 100644
--- a/src/resources/models/models.ts
+++ b/src/resources/models/models.ts
@@ -3,13 +3,13 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
import * as OpenAIAPI from './openai';
-import { OpenAI, OpenAIListResponse } from './openai';
+import { OpenAI } from './openai';
export class Models extends APIResource {
openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client);
/**
- * Get a model by its identifier.
+ * Get model. Get a model by its identifier.
*/
retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/models/${modelId}`, options);
@@ -25,14 +25,14 @@ export class Models extends APIResource {
}
/**
- * Register a model.
+ * Register model. Register a model.
*/
register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/models', { body, ...options });
}
/**
- * Unregister a model.
+ * Unregister model. Unregister a model.
*/
unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/v1/models/${modelId}`, {
@@ -120,5 +120,5 @@ export declare namespace Models {
type ModelRegisterParams as ModelRegisterParams,
};
- export { OpenAI as OpenAI, type OpenAIListResponse as OpenAIListResponse };
+ export { OpenAI as OpenAI };
}
diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts
index bcdef6f..c6b90d1 100644
--- a/src/resources/models/openai.ts
+++ b/src/resources/models/openai.ts
@@ -2,35 +2,15 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
+import * as ModelsAPI from './models';
export class OpenAI extends APIResource {
/**
- * List models using the OpenAI API.
+ * List all models.
*/
- list(options?: Core.RequestOptions): Core.APIPromise {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/openai/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }>
+ this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelsAPI.ModelListResponse }>
)._thenUnwrap((obj) => obj.data);
}
}
-
-export type OpenAIListResponse = Array;
-
-export namespace OpenAIListResponse {
- /**
- * A model from OpenAI.
- */
- export interface OpenAIListResponseItem {
- id: string;
-
- created: number;
-
- object: 'model';
-
- owned_by: string;
- }
-}
-
-export declare namespace OpenAI {
- export { type OpenAIListResponse as OpenAIListResponse };
-}
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index a945ab3..be12766 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -5,10 +5,11 @@ import * as Core from '../core';
export class Moderations extends APIResource {
/**
- * Classifies if text and/or image inputs are potentially harmful.
+ * Create moderation. Classifies if text and/or image inputs are potentially
+ * harmful.
*/
create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/moderations', { body, ...options });
+ return this._client.post('/v1/moderations', { body, ...options });
}
}
diff --git a/src/resources/providers.ts b/src/resources/providers.ts
index d27b9ab..2736f37 100644
--- a/src/resources/providers.ts
+++ b/src/resources/providers.ts
@@ -6,14 +6,14 @@ import * as InspectAPI from './inspect';
export class Providers extends APIResource {
/**
- * Get detailed information about a specific provider.
+ * Get provider. Get detailed information about a specific provider.
*/
retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/providers/${providerId}`, options);
}
/**
- * List all available providers.
+ * List providers. List all available providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts
index 74c556c..c8c672b 100644
--- a/src/resources/responses/input-items.ts
+++ b/src/resources/responses/input-items.ts
@@ -6,7 +6,7 @@ import * as Core from '../../core';
export class InputItems extends APIResource {
/**
- * List input items for a given OpenAI response.
+ * List input items.
*/
list(
responseId: string,
@@ -22,7 +22,7 @@ export class InputItems extends APIResource {
if (isRequestOptions(query)) {
return this.list(responseId, {}, query);
}
- return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options });
+ return this._client.get(`/v1/responses/${responseId}/input_items`, { query, ...options });
}
}
@@ -38,6 +38,8 @@ export interface InputItemListResponse {
| InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
| InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | InputItemListResponse.OpenAIResponseMcpApprovalRequest
+ | InputItemListResponse.OpenAIResponseMcpApprovalResponse
| InputItemListResponse.OpenAIResponseMessage
>;
@@ -181,6 +183,36 @@ export namespace InputItemListResponse {
status?: string;
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index e329519..f785bf5 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -14,7 +14,7 @@ export class Responses extends APIResource {
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
/**
- * Create a new OpenAI response.
+ * Create a model response.
*/
create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
create(
@@ -29,22 +29,20 @@ export class Responses extends APIResource {
body: ResponseCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/responses', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Retrieve an OpenAI response by its ID.
+ * Get a model response.
*/
retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.get(`/v1/responses/${responseId}`, options);
}
/**
- * List all OpenAI responses.
+ * List all responses.
*/
list(
query?: ResponseListParams,
@@ -60,17 +58,17 @@ export class Responses extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/responses', ResponseListResponsesOpenAICursorPage, {
+ return this._client.getAPIList('/v1/responses', ResponseListResponsesOpenAICursorPage, {
query,
...options,
});
}
/**
- * Delete an OpenAI response by its ID.
+ * Delete a response.
*/
delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.delete(`/v1/responses/${responseId}`, options);
}
}
@@ -110,6 +108,7 @@ export interface ResponseObject {
| ResponseObject.OpenAIResponseOutputMessageFunctionToolCall
| ResponseObject.OpenAIResponseOutputMessageMcpCall
| ResponseObject.OpenAIResponseOutputMessageMcpListTools
+ | ResponseObject.OpenAIResponseMcpApprovalRequest
>;
/**
@@ -142,6 +141,16 @@ export interface ResponseObject {
*/
temperature?: number;
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseObject.OpenAIResponseInputToolWebSearch
+ | ResponseObject.OpenAIResponseInputToolFileSearch
+ | ResponseObject.OpenAIResponseInputToolFunction
+ | ResponseObject.OpenAIResponseToolMcp
+ >;
+
/**
* (Optional) Nucleus sampling parameter used for generation
*/
@@ -153,9 +162,9 @@ export interface ResponseObject {
truncation?: string;
/**
- * (Optional) User identifier associated with the request
+ * (Optional) Token usage information for the response
*/
- user?: string;
+ usage?: ResponseObject.Usage;
}
export namespace ResponseObject {
@@ -515,6 +524,21 @@ export namespace ResponseObject {
}
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* Text formatting configuration for the response
*/
@@ -573,6 +597,182 @@ export namespace ResponseObject {
*/
message: string;
}
+
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolWebSearch {
+ /**
+ * Web search tool type variant to use
+ */
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
+
+ /**
+ * (Optional) Size of search context, must be "low", "medium", or "high"
+ */
+ search_context_size?: string;
+ }
+
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFileSearch {
+ /**
+ * Tool type identifier, always "file_search"
+ */
+ type: 'file_search';
+
+ /**
+ * List of vector store identifiers to search within
+ */
+ vector_store_ids: Array;
+
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
+ max_num_results?: number;
+
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
+ }
+
+ export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ export interface RankingOptions {
+ /**
+ * (Optional) Name of the ranking algorithm to use
+ */
+ ranker?: string;
+
+ /**
+ * (Optional) Minimum relevance score threshold for results
+ */
+ score_threshold?: number;
+ }
+ }
+
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
+ */
+ name: string;
+
+ /**
+ * Tool type identifier, always "function"
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Description of what the function does
+ */
+ description?: string;
+
+ /**
+ * (Optional) JSON schema defining the function's parameters
+ */
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to enforce strict parameter validation
+ */
+ strict?: boolean;
+ }
+
+ /**
+ * Model Context Protocol (MCP) tool configuration for OpenAI response object.
+ */
+ export interface OpenAIResponseToolMcp {
+ /**
+ * Label to identify this MCP server
+ */
+ server_label: string;
+
+ /**
+ * Tool type identifier, always "mcp"
+ */
+ type: 'mcp';
+
+ /**
+ * (Optional) Restriction on which tools can be used from this server
+ */
+ allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter;
+ }
+
+ export namespace OpenAIResponseToolMcp {
+ /**
+ * Filter configuration for restricting which MCP tools can be used.
+ */
+ export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
+ tool_names?: Array;
+ }
+ }
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the input
+ */
+ input_tokens: number;
+
+ /**
+ * Number of tokens in the output
+ */
+ output_tokens: number;
+
+ /**
+ * Total tokens used (input + output)
+ */
+ total_tokens: number;
+
+ /**
+ * Detailed breakdown of input token usage
+ */
+ input_tokens_details?: Usage.InputTokensDetails;
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ output_tokens_details?: Usage.OutputTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Detailed breakdown of input token usage
+ */
+ export interface InputTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ export interface OutputTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+ }
}
/**
@@ -580,6 +780,7 @@ export namespace ResponseObject {
*/
export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseInProgress
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta
@@ -599,6 +800,20 @@ export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted
| ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded
| ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallSearching
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted;
export namespace ResponseObjectStream {
@@ -607,7 +822,7 @@ export namespace ResponseObjectStream {
*/
export interface OpenAIResponseObjectStreamResponseCreated {
/**
- * The newly created response object
+ * The response object that was created
*/
response: ResponsesAPI.ResponseObject;
@@ -617,6 +832,26 @@ export namespace ResponseObjectStream {
type: 'response.created';
}
+ /**
+ * Streaming event indicating the response remains in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseInProgress {
+ /**
+ * Current response state while in progress
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.in_progress"
+ */
+ type: 'response.in_progress';
+ }
+
/**
* Streaming event for when a new output item is added to the response.
*/
@@ -630,7 +865,8 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools;
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMcpApprovalRequest;
/**
* Index position of this item in the output list
@@ -1009,6 +1245,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1024,7 +1275,8 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools;
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMcpApprovalRequest;
/**
* Index position of this item in the output list
@@ -1403,6 +1655,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1696,17 +1963,28 @@ export namespace ResponseObjectStream {
* Streaming event for when a new content part is added to a response item.
*/
export interface OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
/**
* Unique identifier of the output item containing this content part
*/
item_id: string;
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
/**
* The content part that was added
*/
part:
| OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal;
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartReasoningText;
/**
* Unique identifier of the response containing this content
@@ -1725,39 +2003,830 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Text content within a streamed response part.
+ */
export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
text: string;
+ /**
+ * Content part type identifier, always "output_text"
+ */
type: 'output_text';
- }
- export interface OpenAIResponseContentPartRefusal {
- refusal: string;
-
- type: 'refusal';
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
- }
- /**
- * Streaming event for when a content part is completed.
- */
- export interface OpenAIResponseObjectStreamResponseContentPartDone {
- /**
- * Unique identifier of the output item containing this content part
- */
- item_id: string;
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
- /**
- * The completed content part
- */
- part:
- | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal;
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
+ }
+
+ /**
+ * Streaming event for when a content part is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the output item containing this content part
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
+ /**
+ * The completed content part
+ */
+ part:
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartReasoningText;
+
+ /**
+ * Unique identifier of the response containing this content
+ */
+ response_id: string;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.content_part.done"
+ */
+ type: 'response.content_part.done';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Text content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "output_text"
+ */
+ type: 'output_text';
+
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+ }
+
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
+ }
+
+ /**
+ * Streaming event for incremental reasoning text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDelta {
+ /**
+ * Index position of the reasoning content part
+ */
+ content_index: number;
+
+ /**
+ * Incremental reasoning text being added
+ */
+ delta: string;
+
+ /**
+ * Unique identifier of the output item being updated
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.delta"
+ */
+ type: 'response.reasoning_text.delta';
+ }
+
+ /**
+ * Streaming event for when reasoning text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDone {
+ /**
+ * Index position of the reasoning content part
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the completed output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Final complete reasoning text
+ */
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.done"
+ */
+ type: 'response.reasoning_text.done';
+ }
+
+ /**
+ * Streaming event for when a new reasoning summary part is added.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * The summary part that was added
+ */
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded.Part;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_part.added"
+ */
+ type: 'response.reasoning_summary_part.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
+ /**
+ * The summary part that was added
+ */
+ export interface Part {
+ /**
+ * Summary text
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "summary_text"
+ */
+ type: 'summary_text';
+ }
+ }
+
+ /**
+ * Streaming event for when a reasoning summary part is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * The completed summary part
+ */
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartDone.Part;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_part.done"
+ */
+ type: 'response.reasoning_summary_part.done';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
+ /**
+ * The completed summary part
+ */
+ export interface Part {
+ /**
+ * Summary text
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "summary_text"
+ */
+ type: 'summary_text';
+ }
+ }
+
+ /**
+ * Streaming event for incremental reasoning summary text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta {
+ /**
+ * Incremental summary text being added
+ */
+ delta: string;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.delta"
+ */
+ type: 'response.reasoning_summary_text.delta';
+ }
+
+ /**
+ * Streaming event for when reasoning summary text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDone {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Final complete summary text
+ */
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.done"
+ */
+ type: 'response.reasoning_summary_text.done';
+ }
+
+ /**
+ * Streaming event for incremental refusal text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDelta {
+ /**
+ * Index position of the content part
+ */
+ content_index: number;
+
+ /**
+ * Incremental refusal text being added
+ */
+ delta: string;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.delta"
+ */
+ type: 'response.refusal.delta';
+ }
+
+ /**
+ * Streaming event for when refusal text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDone {
+ /**
+ * Index position of the content part
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Final complete refusal text
+ */
+ refusal: string;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.done"
+ */
+ type: 'response.refusal.done';
+ }
+
+ /**
+ * Streaming event for when an annotation is added to output text.
+ */
+ export interface OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * The annotation object being added
+ */
+ annotation:
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFilePath;
+
+ /**
+ * Index of the annotation within the content part
+ */
+ annotation_index: number;
+
+ /**
+ * Index position of the content part within the output item
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the item to which the annotation is being added
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item in the response's output array
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.output_text.annotation.added"
+ */
+ type: 'response.output_text.annotation.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Streaming event for file search calls in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallInProgress {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.in_progress"
+ */
+ type: 'response.file_search_call.in_progress';
+ }
+
+ /**
+ * Streaming event for file search currently searching.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallSearching {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.searching"
+ */
+ type: 'response.file_search_call.searching';
+ }
+
+ /**
+ * Streaming event for completed file search calls.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallCompleted {
+ /**
+ * Unique identifier of the completed file search call
+ */
+ item_id: string;
/**
- * Unique identifier of the response containing this content
+ * Index position of the item in the output list
*/
- response_id: string;
+ output_index: number;
/**
* Sequential number for ordering streaming events
@@ -1765,23 +2834,49 @@ export namespace ResponseObjectStream {
sequence_number: number;
/**
- * Event type identifier, always "response.content_part.done"
+ * Event type identifier, always "response.file_search_call.completed"
*/
- type: 'response.content_part.done';
+ type: 'response.file_search_call.completed';
}
- export namespace OpenAIResponseObjectStreamResponseContentPartDone {
- export interface OpenAIResponseContentPartOutputText {
- text: string;
+ /**
+ * Streaming event emitted when a response ends in an incomplete state.
+ */
+ export interface OpenAIResponseObjectStreamResponseIncomplete {
+ /**
+ * Response object describing the incomplete state
+ */
+ response: ResponsesAPI.ResponseObject;
- type: 'output_text';
- }
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
- export interface OpenAIResponseContentPartRefusal {
- refusal: string;
+ /**
+ * Event type identifier, always "response.incomplete"
+ */
+ type: 'response.incomplete';
+ }
- type: 'refusal';
- }
+ /**
+ * Streaming event emitted when a response fails.
+ */
+ export interface OpenAIResponseObjectStreamResponseFailed {
+ /**
+ * Response object describing the failure
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.failed"
+ */
+ type: 'response.failed';
}
/**
@@ -1789,7 +2884,7 @@ export namespace ResponseObjectStream {
*/
export interface OpenAIResponseObjectStreamResponseCompleted {
/**
- * The completed response object
+ * Completed response object
*/
response: ResponsesAPI.ResponseObject;
@@ -1822,6 +2917,8 @@ export interface ResponseListResponse {
| ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
| ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
| ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
+ | ResponseListResponse.OpenAIResponseMcpApprovalResponse
| ResponseListResponse.OpenAIResponseMessage
>;
@@ -1845,6 +2942,7 @@ export interface ResponseListResponse {
| ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
| ResponseListResponse.OpenAIResponseOutputMessageMcpCall
| ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
>;
/**
@@ -1877,6 +2975,16 @@ export interface ResponseListResponse {
*/
temperature?: number;
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseListResponse.OpenAIResponseInputToolWebSearch
+ | ResponseListResponse.OpenAIResponseInputToolFileSearch
+ | ResponseListResponse.OpenAIResponseInputToolFunction
+ | ResponseListResponse.OpenAIResponseToolMcp
+ >;
+
/**
* (Optional) Nucleus sampling parameter used for generation
*/
@@ -1888,9 +2996,9 @@ export interface ResponseListResponse {
truncation?: string;
/**
- * (Optional) User identifier associated with the request
+ * (Optional) Token usage information for the response
*/
- user?: string;
+ usage?: ResponseListResponse.Usage;
}
export namespace ResponseListResponse {
@@ -2027,6 +3135,36 @@ export namespace ResponseListResponse {
status?: string;
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
@@ -2535,6 +3673,21 @@ export namespace ResponseListResponse {
}
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* Text formatting configuration for the response
*/
@@ -2593,6 +3746,182 @@ export namespace ResponseListResponse {
*/
message: string;
}
+
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolWebSearch {
+ /**
+ * Web search tool type variant to use
+ */
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
+
+ /**
+ * (Optional) Size of search context, must be "low", "medium", or "high"
+ */
+ search_context_size?: string;
+ }
+
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFileSearch {
+ /**
+ * Tool type identifier, always "file_search"
+ */
+ type: 'file_search';
+
+ /**
+ * List of vector store identifiers to search within
+ */
+ vector_store_ids: Array;
+
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
+ max_num_results?: number;
+
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
+ }
+
+ export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ export interface RankingOptions {
+ /**
+ * (Optional) Name of the ranking algorithm to use
+ */
+ ranker?: string;
+
+ /**
+ * (Optional) Minimum relevance score threshold for results
+ */
+ score_threshold?: number;
+ }
+ }
+
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
+ */
+ name: string;
+
+ /**
+ * Tool type identifier, always "function"
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Description of what the function does
+ */
+ description?: string;
+
+ /**
+ * (Optional) JSON schema defining the function's parameters
+ */
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to enforce strict parameter validation
+ */
+ strict?: boolean;
+ }
+
+ /**
+ * Model Context Protocol (MCP) tool configuration for OpenAI response object.
+ */
+ export interface OpenAIResponseToolMcp {
+ /**
+ * Label to identify this MCP server
+ */
+ server_label: string;
+
+ /**
+ * Tool type identifier, always "mcp"
+ */
+ type: 'mcp';
+
+ /**
+ * (Optional) Restriction on which tools can be used from this server
+ */
+ allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter;
+ }
+
+ export namespace OpenAIResponseToolMcp {
+ /**
+ * Filter configuration for restricting which MCP tools can be used.
+ */
+ export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
+ tool_names?: Array;
+ }
+ }
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the input
+ */
+ input_tokens: number;
+
+ /**
+ * Number of tokens in the output
+ */
+ output_tokens: number;
+
+ /**
+ * Total tokens used (input + output)
+ */
+ total_tokens: number;
+
+ /**
+ * Detailed breakdown of input token usage
+ */
+ input_tokens_details?: Usage.InputTokensDetails;
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ output_tokens_details?: Usage.OutputTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Detailed breakdown of input token usage
+ */
+ export interface InputTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ export interface OutputTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+ }
}
/**
@@ -2628,6 +3957,8 @@ export interface ResponseCreateParamsBase {
| ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
| ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall
| ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ResponseCreateParams.OpenAIResponseMcpApprovalRequest
+ | ResponseCreateParams.OpenAIResponseMcpApprovalResponse
| ResponseCreateParams.OpenAIResponseMessage
>;
@@ -2636,6 +3967,13 @@ export interface ResponseCreateParamsBase {
*/
model: string;
+ /**
+ * (Optional) The ID of a conversation to add the response to. Must begin with
+ * 'conv\_'. Input and output messages will be automatically added to the
+ * conversation.
+ */
+ conversation?: string;
+
/**
* (Optional) Additional fields to include in the response.
*/
@@ -2805,6 +4143,36 @@ export namespace ResponseCreateParams {
status?: string;
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
diff --git a/src/resources/routes.ts b/src/resources/routes.ts
index 98d5dfe..85e8496 100644
--- a/src/resources/routes.ts
+++ b/src/resources/routes.ts
@@ -6,7 +6,8 @@ import * as InspectAPI from './inspect';
export class Routes extends APIResource {
/**
- * List all available API routes with their methods and implementing providers.
+ * List routes. List all available API routes with their methods and implementing
+ * providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
diff --git a/src/resources/safety.ts b/src/resources/safety.ts
index d41b2c7..902aa14 100644
--- a/src/resources/safety.ts
+++ b/src/resources/safety.ts
@@ -6,7 +6,7 @@ import * as Shared from './shared';
export class Safety extends APIResource {
/**
- * Run a shield.
+ * Run shield. Run a shield.
*/
runShield(body: SafetyRunShieldParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/safety/run-shield', { body, ...options });
@@ -27,7 +27,13 @@ export interface SafetyRunShieldParams {
/**
* The messages to run the shield on.
*/
- messages: Array;
+ messages: Array<
+ | SafetyRunShieldParams.OpenAIUserMessageParam
+ | SafetyRunShieldParams.OpenAISystemMessageParam
+ | SafetyRunShieldParams.OpenAIAssistantMessageParam
+ | SafetyRunShieldParams.OpenAIToolMessageParam
+ | SafetyRunShieldParams.OpenAIDeveloperMessageParam
+ >;
/**
* The parameters of the shield.
@@ -40,6 +46,298 @@ export interface SafetyRunShieldParams {
shield_id: string;
}
+export namespace SafetyRunShieldParams {
+ /**
+ * A message from the user in an OpenAI-compatible chat completion request.
+ */
+ export interface OpenAIUserMessageParam {
+ /**
+ * The content of the message, which can include text and other media
+ */
+ content:
+ | string
+ | Array<
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
+ >;
+
+ /**
+ * Must be "user" to identify this as a user message
+ */
+ role: 'user';
+
+ /**
+ * (Optional) The name of the user message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
+ }
+
+ /**
+ * A system message providing instructions or context to the model.
+ */
+ export interface OpenAISystemMessageParam {
+ /**
+ * The content of the "system prompt". If multiple system messages are provided,
+ * they are concatenated. The underlying Llama Stack code may also add other system
+ * messages (for example, for formatting tool definitions).
+ */
+ content: string | Array;
+
+ /**
+ * Must be "system" to identify this as a system message
+ */
+ role: 'system';
+
+ /**
+ * (Optional) The name of the system message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAISystemMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+
+ /**
+ * A message containing the model's (assistant) response in an OpenAI-compatible
+ * chat completion request.
+ */
+ export interface OpenAIAssistantMessageParam {
+ /**
+ * Must be "assistant" to identify this as the model's response
+ */
+ role: 'assistant';
+
+ /**
+ * The content of the model's response
+ */
+ content?: string | Array;
+
+ /**
+ * (Optional) The name of the assistant message participant.
+ */
+ name?: string;
+
+ /**
+ * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.
+ */
+ tool_calls?: Array;
+ }
+
+ export namespace OpenAIAssistantMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
+ export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Function call details
+ */
+ function?: ToolCall.Function;
+
+ /**
+ * (Optional) Index of the tool call in the list
+ */
+ index?: number;
+ }
+
+ export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
+ export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
+ arguments?: string;
+
+ /**
+ * (Optional) Name of the function to call
+ */
+ name?: string;
+ }
+ }
+ }
+
+ /**
+ * A message representing the result of a tool invocation in an OpenAI-compatible
+ * chat completion request.
+ */
+ export interface OpenAIToolMessageParam {
+ /**
+ * The response content from the tool
+ */
+ content: string | Array;
+
+ /**
+ * Must be "tool" to identify this as a tool response
+ */
+ role: 'tool';
+
+ /**
+ * Unique identifier for the tool call this response is for
+ */
+ tool_call_id: string;
+ }
+
+ export namespace OpenAIToolMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+
+ /**
+ * A message from the developer in an OpenAI-compatible chat completion request.
+ */
+ export interface OpenAIDeveloperMessageParam {
+ /**
+ * The content of the developer message
+ */
+ content: string | Array;
+
+ /**
+ * Must be "developer" to identify this as a developer message
+ */
+ role: 'developer';
+
+ /**
+ * (Optional) The name of the developer message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAIDeveloperMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+}
+
export declare namespace Safety {
export { type RunShieldResponse as RunShieldResponse, type SafetyRunShieldParams as SafetyRunShieldParams };
}
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 00c767f..7a10e74 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -1,7 +1,5 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Shared from './shared';
-import * as InferenceAPI from './inference';
import * as ToolRuntimeAPI from './tool-runtime/tool-runtime';
/**
@@ -105,36 +103,6 @@ export namespace AgentConfig {
}
}
-/**
- * Response from a batch completion request.
- */
-export interface BatchCompletion {
- /**
- * List of completion responses, one for each input in the batch
- */
- batch: Array;
-}
-
-/**
- * Response from a chat completion request.
- */
-export interface ChatCompletionResponse {
- /**
- * The complete response message
- */
- completion_message: CompletionMessage;
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
/**
* A message containing the model's (assistant) response in a chat conversation.
*/
@@ -165,63 +133,6 @@ export interface CompletionMessage {
tool_calls?: Array;
}
-/**
- * A text content delta for streaming responses.
- */
-export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta;
-
-export namespace ContentDelta {
- /**
- * A text content delta for streaming responses.
- */
- export interface TextDelta {
- /**
- * The incremental text content
- */
- text: string;
-
- /**
- * Discriminator type of the delta. Always "text"
- */
- type: 'text';
- }
-
- /**
- * An image content delta for streaming responses.
- */
- export interface ImageDelta {
- /**
- * The incremental image data as bytes
- */
- image: string;
-
- /**
- * Discriminator type of the delta. Always "image"
- */
- type: 'image';
- }
-
- /**
- * A tool call content delta for streaming responses.
- */
- export interface ToolCallDelta {
- /**
- * Current parsing status of the tool call
- */
- parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded';
-
- /**
- * Either an in-progress tool call string or the final parsed tool call
- */
- tool_call: string | Shared.ToolCall;
-
- /**
- * Discriminator type of the delta. Always "tool_call"
- */
- type: 'tool_call';
- }
-}
-
/**
* A document to be used for document ingestion in the RAG Tool.
*/
@@ -472,26 +383,6 @@ export namespace InterleavedContentItem {
*/
export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage;
-/**
- * A metric value included in API responses.
- */
-export interface Metric {
- /**
- * The name of the metric
- */
- metric: string;
-
- /**
- * The numeric value of the metric
- */
- value: number;
-
- /**
- * (Optional) The unit of measurement for the metric value
- */
- unit?: string;
-}
-
/**
* Parameter type for string values.
*/
@@ -634,7 +525,7 @@ export interface QueryConfig {
/**
* Configuration for the query generator.
*/
- query_generator_config: QueryGeneratorConfig;
+ query_generator_config: QueryConfig.DefaultRagQueryGeneratorConfig | QueryConfig.LlmragQueryGeneratorConfig;
/**
* Search mode for retrieval—either "vector", "keyword", or "hybrid". Default
@@ -650,79 +541,70 @@ export interface QueryConfig {
export namespace QueryConfig {
/**
- * Reciprocal Rank Fusion (RRF) ranker configuration.
+ * Configuration for the default RAG query generator.
*/
- export interface RrfRanker {
+ export interface DefaultRagQueryGeneratorConfig {
/**
- * The impact factor for RRF scoring. Higher values give more weight to
- * higher-ranked results. Must be greater than 0
+ * String separator used to join query terms
*/
- impact_factor: number;
+ separator: string;
/**
- * The type of ranker, always "rrf"
+ * Type of query generator, always 'default'
*/
- type: 'rrf';
+ type: 'default';
}
/**
- * Weighted ranker configuration that combines vector and keyword scores.
+ * Configuration for the LLM-based RAG query generator.
*/
- export interface WeightedRanker {
+ export interface LlmragQueryGeneratorConfig {
/**
- * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use
- * vector scores, values in between blend both scores.
+ * Name of the language model to use for query generation
*/
- alpha: number;
+ model: string;
/**
- * The type of ranker, always "weighted"
+ * Template string for formatting the query generation prompt
*/
- type: 'weighted';
- }
-}
+ template: string;
-/**
- * Configuration for the default RAG query generator.
- */
-export type QueryGeneratorConfig =
- | QueryGeneratorConfig.DefaultRagQueryGeneratorConfig
- | QueryGeneratorConfig.LlmragQueryGeneratorConfig;
+ /**
+ * Type of query generator, always 'llm'
+ */
+ type: 'llm';
+ }
-export namespace QueryGeneratorConfig {
/**
- * Configuration for the default RAG query generator.
+ * Reciprocal Rank Fusion (RRF) ranker configuration.
*/
- export interface DefaultRagQueryGeneratorConfig {
+ export interface RrfRanker {
/**
- * String separator used to join query terms
+ * The impact factor for RRF scoring. Higher values give more weight to
+ * higher-ranked results. Must be greater than 0
*/
- separator: string;
+ impact_factor: number;
/**
- * Type of query generator, always 'default'
+ * The type of ranker, always "rrf"
*/
- type: 'default';
+ type: 'rrf';
}
/**
- * Configuration for the LLM-based RAG query generator.
+ * Weighted ranker configuration that combines vector and keyword scores.
*/
- export interface LlmragQueryGeneratorConfig {
- /**
- * Name of the language model to use for query generation
- */
- model: string;
-
+ export interface WeightedRanker {
/**
- * Template string for formatting the query generation prompt
+ * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use
+ * vector scores, values in between blend both scores.
*/
- template: string;
+ alpha: number;
/**
- * Type of query generator, always 'llm'
+ * The type of ranker, always "weighted"
*/
- type: 'llm';
+ type: 'weighted';
}
}
@@ -914,33 +796,11 @@ export interface SystemMessage {
}
export interface ToolCall {
- arguments:
- | string
- | {
- [key: string]:
- | string
- | number
- | boolean
- | Array
- | { [key: string]: string | number | boolean | null }
- | null;
- };
+ arguments: string;
call_id: string;
tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- arguments_json?: string;
-}
-
-export interface ToolParamDefinition {
- param_type: string;
-
- default?: boolean | number | string | Array | unknown | null;
-
- description?: string;
-
- required?: boolean;
}
/**
diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts
index 8064a72..5c711ae 100644
--- a/src/resources/telemetry.ts
+++ b/src/resources/telemetry.ts
@@ -12,7 +12,7 @@ export class Telemetry extends APIResource {
spanId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/telemetry/traces/${traceId}/spans/${spanId}`, options);
+ return this._client.get(`/v1alpha/telemetry/traces/${traceId}/spans/${spanId}`, options);
}
/**
@@ -24,7 +24,7 @@ export class Telemetry extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return (
- this._client.post(`/v1/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{
+ this._client.post(`/v1alpha/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{
data: TelemetryGetSpanTreeResponse;
}>
)._thenUnwrap((obj) => obj.data);
@@ -34,18 +34,7 @@ export class Telemetry extends APIResource {
* Get a trace by its ID.
*/
getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/telemetry/traces/${traceId}`, options);
- }
-
- /**
- * Log an event.
- */
- logEvent(body: TelemetryLogEventParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/telemetry/events', {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
+ return this._client.get(`/v1alpha/telemetry/traces/${traceId}`, options);
}
/**
@@ -57,7 +46,7 @@ export class Telemetry extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return (
- this._client.post(`/v1/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{
+ this._client.post(`/v1alpha/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{
data: TelemetryQueryMetricsResponse;
}>
)._thenUnwrap((obj) => obj.data);
@@ -71,7 +60,7 @@ export class Telemetry extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return (
- this._client.post('/v1/telemetry/spans', { body, ...options }) as Core.APIPromise<{
+ this._client.post('/v1alpha/telemetry/spans', { body, ...options }) as Core.APIPromise<{
data: TelemetryQuerySpansResponse;
}>
)._thenUnwrap((obj) => obj.data);
@@ -85,7 +74,7 @@ export class Telemetry extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return (
- this._client.post('/v1/telemetry/traces', { body, ...options }) as Core.APIPromise<{
+ this._client.post('/v1alpha/telemetry/traces', { body, ...options }) as Core.APIPromise<{
data: TelemetryQueryTracesResponse;
}>
)._thenUnwrap((obj) => obj.data);
@@ -98,7 +87,7 @@ export class Telemetry extends APIResource {
body: TelemetrySaveSpansToDatasetParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/telemetry/spans/export', {
+ return this._client.post('/v1alpha/telemetry/spans/export', {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
@@ -542,18 +531,6 @@ export interface TelemetryGetSpanTreeParams {
max_depth?: number;
}
-export interface TelemetryLogEventParams {
- /**
- * The event to log.
- */
- event: Event;
-
- /**
- * The time to live of the event.
- */
- ttl_seconds: number;
-}
-
export interface TelemetryQueryMetricsParams {
/**
* The type of query to perform.
@@ -677,7 +654,6 @@ export declare namespace Telemetry {
type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse,
type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse,
type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams as TelemetryLogEventParams,
type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams,
type TelemetryQuerySpansParams as TelemetryQuerySpansParams,
type TelemetryQueryTracesParams as TelemetryQueryTracesParams,
diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts
index ca1a6c8..3324906 100644
--- a/src/resources/tool-runtime/tool-runtime.ts
+++ b/src/resources/tool-runtime/tool-runtime.ts
@@ -57,47 +57,25 @@ export interface ToolDef {
*/
description?: string;
+ /**
+ * (Optional) JSON Schema for tool inputs (MCP inputSchema)
+ */
+ input_schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+
/**
* (Optional) Additional metadata about the tool
*/
metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * (Optional) List of parameters this tool accepts
+ * (Optional) JSON Schema for tool outputs (MCP outputSchema)
*/
- parameters?: Array;
-}
+ output_schema?: { [key: string]: boolean | number | string | Array | unknown | null };
-export namespace ToolDef {
/**
- * Parameter definition for a tool.
+ * (Optional) ID of the tool group this tool belongs to
*/
- export interface Parameter {
- /**
- * Human-readable description of what the parameter does
- */
- description: string;
-
- /**
- * Name of the parameter
- */
- name: string;
-
- /**
- * Type of the parameter (e.g., string, integer)
- */
- parameter_type: string;
-
- /**
- * Whether this parameter is required for tool invocation
- */
- required: boolean;
-
- /**
- * (Optional) Default value for the parameter if not provided
- */
- default?: boolean | number | string | Array | unknown | null;
- }
+ toolgroup_id?: string;
}
/**
diff --git a/src/resources/tools.ts b/src/resources/tools.ts
index ba35360..668d2ce 100644
--- a/src/resources/tools.ts
+++ b/src/resources/tools.ts
@@ -3,6 +3,7 @@
import { APIResource } from '../resource';
import { isRequestOptions } from '../core';
import * as Core from '../core';
+import * as ToolRuntimeAPI from './tool-runtime/tool-runtime';
export class Tools extends APIResource {
/**
@@ -25,93 +26,15 @@ export class Tools extends APIResource {
/**
* Get a tool by its name.
*/
- get(toolName: string, options?: Core.RequestOptions): Core.APIPromise {
+ get(toolName: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/tools/${toolName}`, options);
}
}
/**
- * Response containing a list of tools.
+ * List of tool definitions
*/
-export interface ListToolsResponse {
- /**
- * List of tools
- */
- data: ToolListResponse;
-}
-
-/**
- * A tool that can be invoked by agents.
- */
-export interface Tool {
- /**
- * Human-readable description of what the tool does
- */
- description: string;
-
- identifier: string;
-
- /**
- * List of parameters this tool accepts
- */
- parameters: Array;
-
- provider_id: string;
-
- /**
- * ID of the tool group this tool belongs to
- */
- toolgroup_id: string;
-
- /**
- * Type of resource, always 'tool'
- */
- type: 'tool';
-
- /**
- * (Optional) Additional metadata about the tool
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_resource_id?: string;
-}
-
-export namespace Tool {
- /**
- * Parameter definition for a tool.
- */
- export interface Parameter {
- /**
- * Human-readable description of what the parameter does
- */
- description: string;
-
- /**
- * Name of the parameter
- */
- name: string;
-
- /**
- * Type of the parameter (e.g., string, integer)
- */
- parameter_type: string;
-
- /**
- * Whether this parameter is required for tool invocation
- */
- required: boolean;
-
- /**
- * (Optional) Default value for the parameter if not provided
- */
- default?: boolean | number | string | Array | unknown | null;
- }
-}
-
-/**
- * List of tools
- */
-export type ToolListResponse = Array;
+export type ToolListResponse = Array;
export interface ToolListParams {
/**
@@ -121,10 +44,5 @@ export interface ToolListParams {
}
export declare namespace Tools {
- export {
- type ListToolsResponse as ListToolsResponse,
- type Tool as Tool,
- type ToolListResponse as ToolListResponse,
- type ToolListParams as ToolListParams,
- };
+ export { type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams };
}
diff --git a/src/resources/vector-dbs.ts b/src/resources/vector-dbs.ts
deleted file mode 100644
index 3004227..0000000
--- a/src/resources/vector-dbs.ts
+++ /dev/null
@@ -1,185 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import * as Core from '../core';
-
-export class VectorDBs extends APIResource {
- /**
- * Get a vector database by its identifier.
- */
- retrieve(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/vector-dbs/${vectorDBId}`, options);
- }
-
- /**
- * List all vector databases.
- */
- list(options?: Core.RequestOptions): Core.APIPromise {
- return (
- this._client.get('/v1/vector-dbs', options) as Core.APIPromise<{ data: VectorDBListResponse }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Register a vector database.
- */
- register(
- body: VectorDBRegisterParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/vector-dbs', { body, ...options });
- }
-
- /**
- * Unregister a vector database.
- */
- unregister(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/vector-dbs/${vectorDBId}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * Response from listing vector databases.
- */
-export interface ListVectorDBsResponse {
- /**
- * List of vector databases
- */
- data: VectorDBListResponse;
-}
-
-/**
- * Vector database resource for storing and querying vector embeddings.
- */
-export interface VectorDBRetrieveResponse {
- /**
- * Dimension of the embedding vectors
- */
- embedding_dimension: number;
-
- /**
- * Name of the embedding model to use for vector generation
- */
- embedding_model: string;
-
- identifier: string;
-
- provider_id: string;
-
- /**
- * Type of resource, always 'vector_db' for vector databases
- */
- type: 'vector_db';
-
- provider_resource_id?: string;
-
- vector_db_name?: string;
-}
-
-/**
- * List of vector databases
- */
-export type VectorDBListResponse = Array;
-
-export namespace VectorDBListResponse {
- /**
- * Vector database resource for storing and querying vector embeddings.
- */
- export interface VectorDBListResponseItem {
- /**
- * Dimension of the embedding vectors
- */
- embedding_dimension: number;
-
- /**
- * Name of the embedding model to use for vector generation
- */
- embedding_model: string;
-
- identifier: string;
-
- provider_id: string;
-
- /**
- * Type of resource, always 'vector_db' for vector databases
- */
- type: 'vector_db';
-
- provider_resource_id?: string;
-
- vector_db_name?: string;
- }
-}
-
-/**
- * Vector database resource for storing and querying vector embeddings.
- */
-export interface VectorDBRegisterResponse {
- /**
- * Dimension of the embedding vectors
- */
- embedding_dimension: number;
-
- /**
- * Name of the embedding model to use for vector generation
- */
- embedding_model: string;
-
- identifier: string;
-
- provider_id: string;
-
- /**
- * Type of resource, always 'vector_db' for vector databases
- */
- type: 'vector_db';
-
- provider_resource_id?: string;
-
- vector_db_name?: string;
-}
-
-export interface VectorDBRegisterParams {
- /**
- * The embedding model to use.
- */
- embedding_model: string;
-
- /**
- * The identifier of the vector database to register.
- */
- vector_db_id: string;
-
- /**
- * The dimension of the embedding model.
- */
- embedding_dimension?: number;
-
- /**
- * The identifier of the provider.
- */
- provider_id?: string;
-
- /**
- * The identifier of the vector database in the provider.
- */
- provider_vector_db_id?: string;
-
- /**
- * The name of the vector database.
- */
- vector_db_name?: string;
-}
-
-export declare namespace VectorDBs {
- export {
- type ListVectorDBsResponse as ListVectorDBsResponse,
- type VectorDBRetrieveResponse as VectorDBRetrieveResponse,
- type VectorDBListResponse as VectorDBListResponse,
- type VectorDBRegisterResponse as VectorDBRegisterResponse,
- type VectorDBRegisterParams as VectorDBRegisterParams,
- };
-}
diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts
new file mode 100644
index 0000000..75085eb
--- /dev/null
+++ b/src/resources/vector-stores/file-batches.ts
@@ -0,0 +1,264 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as FilesAPI from './files';
+import { VectorStoreFilesOpenAICursorPage } from './files';
+import { type OpenAICursorPageParams } from '../../pagination';
+
+export class FileBatches extends APIResource {
+ /**
+ * Create a vector store file batch. Generate an OpenAI-compatible vector store
+ * file batch for the given vector store.
+ */
+ create(
+ vectorStoreId: string,
+ body: FileBatchCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches`, { body, ...options });
+ }
+
+ /**
+ * Retrieve a vector store file batch.
+ */
+ retrieve(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}`, options);
+ }
+
+ /**
+ * Cancels a vector store file batch.
+ */
+ cancel(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, options);
+ }
+
+ /**
+ * Returns a list of vector store files in a batch.
+ */
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ query?: FileBatchListFilesParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ listFiles(
+ vectorStoreId: string,
+ batchId: string,
+ query: FileBatchListFilesParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.listFiles(vectorStoreId, batchId, {}, query);
+ }
+ return this._client.getAPIList(
+ `/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`,
+ VectorStoreFilesOpenAICursorPage,
+ { query, ...options },
+ );
+ }
+}
+
+/**
+ * Response from listing files in a vector store file batch.
+ */
+export interface ListVectorStoreFilesInBatchResponse {
+ /**
+ * List of vector store file objects in the batch
+ */
+ data: Array;
+
+ /**
+ * Whether there are more files available beyond this page
+ */
+ has_more: boolean;
+
+ /**
+ * Object type identifier, always "list"
+ */
+ object: string;
+
+ /**
+ * (Optional) ID of the first file in the list for pagination
+ */
+ first_id?: string;
+
+ /**
+ * (Optional) ID of the last file in the list for pagination
+ */
+ last_id?: string;
+}
+
+/**
+ * OpenAI Vector Store File Batch object.
+ */
+export interface VectorStoreFileBatches {
+ /**
+ * Unique identifier for the file batch
+ */
+ id: string;
+
+ /**
+ * Timestamp when the file batch was created
+ */
+ created_at: number;
+
+ /**
+ * File processing status counts for the batch
+ */
+ file_counts: VectorStoreFileBatches.FileCounts;
+
+ /**
+ * Object type identifier, always "vector_store.file_batch"
+ */
+ object: string;
+
+ /**
+ * Current processing status of the file batch
+ */
+ status: 'completed' | 'in_progress' | 'cancelled' | 'failed';
+
+ /**
+ * ID of the vector store containing the file batch
+ */
+ vector_store_id: string;
+}
+
+export namespace VectorStoreFileBatches {
+ /**
+ * File processing status counts for the batch
+ */
+ export interface FileCounts {
+ /**
+ * Number of files that had their processing cancelled
+ */
+ cancelled: number;
+
+ /**
+ * Number of files that have been successfully processed
+ */
+ completed: number;
+
+ /**
+ * Number of files that failed to process
+ */
+ failed: number;
+
+ /**
+ * Number of files currently being processed
+ */
+ in_progress: number;
+
+ /**
+ * Total number of files in the vector store
+ */
+ total: number;
+ }
+}
+
+export interface FileBatchCreateParams {
+ /**
+ * A list of File IDs that the vector store should use
+ */
+ file_ids: Array;
+
+ /**
+ * (Optional) Key-value attributes to store with the files
+ */
+ attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) The chunking strategy used to chunk the file(s). Defaults to auto
+ */
+ chunking_strategy?:
+ | FileBatchCreateParams.VectorStoreChunkingStrategyAuto
+ | FileBatchCreateParams.VectorStoreChunkingStrategyStatic;
+}
+
+export namespace FileBatchCreateParams {
+ /**
+ * Automatic chunking strategy for vector store files.
+ */
+ export interface VectorStoreChunkingStrategyAuto {
+ /**
+ * Strategy type, always "auto" for automatic chunking
+ */
+ type: 'auto';
+ }
+
+ /**
+ * Static chunking strategy with configurable parameters.
+ */
+ export interface VectorStoreChunkingStrategyStatic {
+ /**
+ * Configuration parameters for the static chunking strategy
+ */
+ static: VectorStoreChunkingStrategyStatic.Static;
+
+ /**
+ * Strategy type, always "static" for static chunking
+ */
+ type: 'static';
+ }
+
+ export namespace VectorStoreChunkingStrategyStatic {
+ /**
+ * Configuration parameters for the static chunking strategy
+ */
+ export interface Static {
+ /**
+ * Number of tokens to overlap between adjacent chunks
+ */
+ chunk_overlap_tokens: number;
+
+ /**
+ * Maximum number of tokens per chunk, must be between 100 and 4096
+ */
+ max_chunk_size_tokens: number;
+ }
+ }
+}
+
+export interface FileBatchListFilesParams extends OpenAICursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list.
+ */
+ before?: string;
+
+ /**
+ * Filter by file status. One of in_progress, completed, failed, cancelled.
+ */
+ filter?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: string;
+}
+
+export declare namespace FileBatches {
+ export {
+ type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse,
+ type VectorStoreFileBatches as VectorStoreFileBatches,
+ type FileBatchCreateParams as FileBatchCreateParams,
+ type FileBatchListFilesParams as FileBatchListFilesParams,
+ };
+}
+
+export { VectorStoreFilesOpenAICursorPage };
diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts
index bc950cc..9af2869 100644
--- a/src/resources/vector-stores/files.ts
+++ b/src/resources/vector-stores/files.ts
@@ -14,7 +14,7 @@ export class Files extends APIResource {
body: FileCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files`, { body, ...options });
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}/files`, { body, ...options });
}
/**
@@ -25,7 +25,7 @@ export class Files extends APIResource {
fileId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options);
+ return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options);
}
/**
@@ -37,10 +37,7 @@ export class Files extends APIResource {
body: FileUpdateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, {
- body,
- ...options,
- });
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { body, ...options });
}
/**
@@ -64,7 +61,7 @@ export class Files extends APIResource {
return this.list(vectorStoreId, {}, query);
}
return this._client.getAPIList(
- `/v1/openai/v1/vector_stores/${vectorStoreId}/files`,
+ `/v1/vector_stores/${vectorStoreId}/files`,
VectorStoreFilesOpenAICursorPage,
{ query, ...options },
);
@@ -78,7 +75,7 @@ export class Files extends APIResource {
fileId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options);
+ return this._client.delete(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options);
}
/**
@@ -89,7 +86,7 @@ export class Files extends APIResource {
fileId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options);
+ return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options);
}
}
diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts
index 4b35bbb..0f53c8f 100644
--- a/src/resources/vector-stores/index.ts
+++ b/src/resources/vector-stores/index.ts
@@ -1,5 +1,12 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+export {
+ FileBatches,
+ type ListVectorStoreFilesInBatchResponse,
+ type VectorStoreFileBatches,
+ type FileBatchCreateParams,
+ type FileBatchListFilesParams,
+} from './file-batches';
export {
VectorStoreFilesOpenAICursorPage,
Files,
diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts
index e8994e2..85db692 100644
--- a/src/resources/vector-stores/vector-stores.ts
+++ b/src/resources/vector-stores/vector-stores.ts
@@ -3,6 +3,14 @@
import { APIResource } from '../../resource';
import { isRequestOptions } from '../../core';
import * as Core from '../../core';
+import * as FileBatchesAPI from './file-batches';
+import {
+ FileBatchCreateParams,
+ FileBatchListFilesParams,
+ FileBatches,
+ ListVectorStoreFilesInBatchResponse,
+ VectorStoreFileBatches,
+} from './file-batches';
import * as FilesAPI from './files';
import {
FileContentResponse,
@@ -18,19 +26,21 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination'
export class VectorStores extends APIResource {
files: FilesAPI.Files = new FilesAPI.Files(this._client);
+ fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client);
/**
- * Creates a vector store.
+ * Creates a vector store. Generate an OpenAI-compatible vector store with the
+ * given parameters.
*/
create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/vector_stores', { body, ...options });
+ return this._client.post('/v1/vector_stores', { body, ...options });
}
/**
* Retrieves a vector store.
*/
retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options);
+ return this._client.get(`/v1/vector_stores/${vectorStoreId}`, options);
}
/**
@@ -41,7 +51,7 @@ export class VectorStores extends APIResource {
body: VectorStoreUpdateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}`, { body, ...options });
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}`, { body, ...options });
}
/**
@@ -59,17 +69,14 @@ export class VectorStores extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/vector_stores', VectorStoresOpenAICursorPage, {
- query,
- ...options,
- });
+ return this._client.getAPIList('/v1/vector_stores', VectorStoresOpenAICursorPage, { query, ...options });
}
/**
* Delete a vector store.
*/
delete(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options);
+ return this._client.delete(`/v1/vector_stores/${vectorStoreId}`, options);
}
/**
@@ -81,7 +88,7 @@ export class VectorStores extends APIResource {
body: VectorStoreSearchParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/search`, { body, ...options });
+ return this._client.post(`/v1/vector_stores/${vectorStoreId}/search`, { body, ...options });
}
}
@@ -310,46 +317,29 @@ export namespace VectorStoreSearchResponse {
export interface VectorStoreCreateParams {
/**
- * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- * strategy.
+ * (Optional) Strategy for splitting files into chunks
*/
chunking_strategy?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * The dimension of the embedding vectors (default: 384).
- */
- embedding_dimension?: number;
-
- /**
- * The embedding model to use for this vector store.
- */
- embedding_model?: string;
-
- /**
- * The expiration policy for a vector store.
+ * (Optional) Expiration policy for the vector store
*/
expires_after?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * A list of File IDs that the vector store should use. Useful for tools like
- * `file_search` that can access files.
+ * List of file IDs to include in the vector store
*/
file_ids?: Array;
/**
- * Set of 16 key-value pairs that can be attached to an object.
+ * Set of key-value pairs that can be attached to the vector store
*/
metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * A name for the vector store.
+ * (Optional) A name for the vector store
*/
name?: string;
-
- /**
- * The ID of the provider to use for this vector store.
- */
- provider_id?: string;
}
export interface VectorStoreUpdateParams {
@@ -435,6 +425,7 @@ export namespace VectorStoreSearchParams {
VectorStores.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage;
VectorStores.Files = Files;
VectorStores.VectorStoreFilesOpenAICursorPage = VectorStoreFilesOpenAICursorPage;
+VectorStores.FileBatches = FileBatches;
export declare namespace VectorStores {
export {
@@ -459,4 +450,12 @@ export declare namespace VectorStores {
type FileUpdateParams as FileUpdateParams,
type FileListParams as FileListParams,
};
+
+ export {
+ FileBatches as FileBatches,
+ type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse,
+ type VectorStoreFileBatches as VectorStoreFileBatches,
+ type FileBatchCreateParams as FileBatchCreateParams,
+ type FileBatchListFilesParams as FileBatchListFilesParams,
+ };
}
diff --git a/src/version.ts b/src/version.ts
index 834272b..3c51123 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '0.2.23'; // x-release-please-version
+export const VERSION = '0.3.0-alpha.1'; // x-release-please-version
diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/alpha/agents/agents.test.ts
similarity index 79%
rename from tests/api-resources/agents/agents.test.ts
rename to tests/api-resources/alpha/agents/agents.test.ts
index 2f22dff..f06a1d3 100644
--- a/tests/api-resources/agents/agents.test.ts
+++ b/tests/api-resources/alpha/agents/agents.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource agents', () => {
test('create: only required params', async () => {
- const responsePromise = client.agents.create({
+ const responsePromise = client.alpha.agents.create({
agent_config: { instructions: 'instructions', model: 'model' },
});
const rawResponse = await responsePromise.asResponse();
@@ -20,7 +20,7 @@ describe('resource agents', () => {
});
test('create: required and optional params', async () => {
- const response = await client.agents.create({
+ const response = await client.alpha.agents.create({
agent_config: {
instructions: 'instructions',
model: 'model',
@@ -28,16 +28,10 @@ describe('resource agents', () => {
{
name: 'name',
description: 'description',
+ input_schema: { foo: true },
metadata: { foo: true },
- parameters: [
- {
- description: 'description',
- name: 'name',
- parameter_type: 'parameter_type',
- required: true,
- default: true,
- },
- ],
+ output_schema: { foo: true },
+ toolgroup_id: 'toolgroup_id',
},
],
enable_session_persistence: true,
@@ -61,7 +55,7 @@ describe('resource agents', () => {
});
test('retrieve', async () => {
- const responsePromise = client.agents.retrieve('agent_id');
+ const responsePromise = client.alpha.agents.retrieve('agent_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -73,13 +67,13 @@ describe('resource agents', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(client.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
- LlamaStackClient.NotFoundError,
- );
+ await expect(
+ client.alpha.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('list', async () => {
- const responsePromise = client.agents.list();
+ const responsePromise = client.alpha.agents.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -91,7 +85,7 @@ describe('resource agents', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(client.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.alpha.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
LlamaStackClient.NotFoundError,
);
});
@@ -99,12 +93,12 @@ describe('resource agents', () => {
test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }),
+ client.alpha.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('delete', async () => {
- const responsePromise = client.agents.delete('agent_id');
+ const responsePromise = client.alpha.agents.delete('agent_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -116,8 +110,8 @@ describe('resource agents', () => {
test('delete: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(client.agents.delete('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
- LlamaStackClient.NotFoundError,
- );
+ await expect(
+ client.alpha.agents.delete('agent_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
});
});
diff --git a/tests/api-resources/agents/session.test.ts b/tests/api-resources/alpha/agents/session.test.ts
similarity index 81%
rename from tests/api-resources/agents/session.test.ts
rename to tests/api-resources/alpha/agents/session.test.ts
index efcf0e7..6a21a85 100644
--- a/tests/api-resources/agents/session.test.ts
+++ b/tests/api-resources/alpha/agents/session.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource session', () => {
test('create: only required params', async () => {
- const responsePromise = client.agents.session.create('agent_id', { session_name: 'session_name' });
+ const responsePromise = client.alpha.agents.session.create('agent_id', { session_name: 'session_name' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -18,11 +18,11 @@ describe('resource session', () => {
});
test('create: required and optional params', async () => {
- const response = await client.agents.session.create('agent_id', { session_name: 'session_name' });
+ const response = await client.alpha.agents.session.create('agent_id', { session_name: 'session_name' });
});
test('retrieve', async () => {
- const responsePromise = client.agents.session.retrieve('agent_id', 'session_id');
+ const responsePromise = client.alpha.agents.session.retrieve('agent_id', 'session_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -35,14 +35,14 @@ describe('resource session', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('retrieve: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.session.retrieve(
+ client.alpha.agents.session.retrieve(
'agent_id',
'session_id',
{ turn_ids: ['string'] },
@@ -52,7 +52,7 @@ describe('resource session', () => {
});
test('list', async () => {
- const responsePromise = client.agents.session.list('agent_id');
+ const responsePromise = client.alpha.agents.session.list('agent_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -65,14 +65,14 @@ describe('resource session', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.session.list(
+ client.alpha.agents.session.list(
'agent_id',
{ limit: 0, start_index: 0 },
{ path: '/_stainless_unknown_path' },
@@ -81,7 +81,7 @@ describe('resource session', () => {
});
test('delete', async () => {
- const responsePromise = client.agents.session.delete('agent_id', 'session_id');
+ const responsePromise = client.alpha.agents.session.delete('agent_id', 'session_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -94,7 +94,7 @@ describe('resource session', () => {
test('delete: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
});
diff --git a/tests/api-resources/agents/steps.test.ts b/tests/api-resources/alpha/agents/steps.test.ts
similarity index 82%
rename from tests/api-resources/agents/steps.test.ts
rename to tests/api-resources/alpha/agents/steps.test.ts
index 0696783..ef3a136 100644
--- a/tests/api-resources/agents/steps.test.ts
+++ b/tests/api-resources/alpha/agents/steps.test.ts
@@ -7,7 +7,12 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource steps', () => {
test('retrieve', async () => {
- const responsePromise = client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id');
+ const responsePromise = client.alpha.agents.steps.retrieve(
+ 'agent_id',
+ 'session_id',
+ 'turn_id',
+ 'step_id',
+ );
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -20,7 +25,7 @@ describe('resource steps', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', {
+ client.alpha.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', {
path: '/_stainless_unknown_path',
}),
).rejects.toThrow(LlamaStackClient.NotFoundError);
diff --git a/tests/api-resources/agents/turn.test.ts b/tests/api-resources/alpha/agents/turn.test.ts
similarity index 81%
rename from tests/api-resources/agents/turn.test.ts
rename to tests/api-resources/alpha/agents/turn.test.ts
index dd4e3de..fc36021 100644
--- a/tests/api-resources/agents/turn.test.ts
+++ b/tests/api-resources/alpha/agents/turn.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource turn', () => {
test('create: only required params', async () => {
- const responsePromise = client.agents.turn.create('agent_id', 'session_id', {
+ const responsePromise = client.alpha.agents.turn.create('agent_id', 'session_id', {
messages: [{ content: 'string', role: 'user' }],
});
const rawResponse = await responsePromise.asResponse();
@@ -20,7 +20,7 @@ describe('resource turn', () => {
});
test('create: required and optional params', async () => {
- const response = await client.agents.turn.create('agent_id', 'session_id', {
+ const response = await client.alpha.agents.turn.create('agent_id', 'session_id', {
messages: [{ content: 'string', role: 'user', context: 'string' }],
documents: [{ content: 'string', mime_type: 'mime_type' }],
stream: false,
@@ -30,7 +30,7 @@ describe('resource turn', () => {
});
test('retrieve', async () => {
- const responsePromise = client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id');
+ const responsePromise = client.alpha.agents.turn.retrieve('agent_id', 'session_id', 'turn_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -43,12 +43,14 @@ describe('resource turn', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', {
+ path: '/_stainless_unknown_path',
+ }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('resume: only required params', async () => {
- const responsePromise = client.agents.turn.resume('agent_id', 'session_id', 'turn_id', {
+ const responsePromise = client.alpha.agents.turn.resume('agent_id', 'session_id', 'turn_id', {
tool_responses: [{ call_id: 'call_id', content: 'string', tool_name: 'brave_search' }],
});
const rawResponse = await responsePromise.asResponse();
@@ -61,7 +63,7 @@ describe('resource turn', () => {
});
test('resume: required and optional params', async () => {
- const response = await client.agents.turn.resume('agent_id', 'session_id', 'turn_id', {
+ const response = await client.alpha.agents.turn.resume('agent_id', 'session_id', 'turn_id', {
tool_responses: [
{ call_id: 'call_id', content: 'string', tool_name: 'brave_search', metadata: { foo: true } },
],
diff --git a/tests/api-resources/eval/eval.test.ts b/tests/api-resources/alpha/eval/eval.test.ts
similarity index 92%
rename from tests/api-resources/eval/eval.test.ts
rename to tests/api-resources/alpha/eval/eval.test.ts
index 9f3e461..af16560 100644
--- a/tests/api-resources/eval/eval.test.ts
+++ b/tests/api-resources/alpha/eval/eval.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource eval', () => {
test('evaluateRows: only required params', async () => {
- const responsePromise = client.eval.evaluateRows('benchmark_id', {
+ const responsePromise = client.alpha.eval.evaluateRows('benchmark_id', {
benchmark_config: {
eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' },
scoring_params: {
@@ -32,7 +32,7 @@ describe('resource eval', () => {
});
test('evaluateRows: required and optional params', async () => {
- const response = await client.eval.evaluateRows('benchmark_id', {
+ const response = await client.alpha.eval.evaluateRows('benchmark_id', {
benchmark_config: {
eval_candidate: {
model: 'model',
@@ -62,7 +62,7 @@ describe('resource eval', () => {
});
test('evaluateRowsAlpha: only required params', async () => {
- const responsePromise = client.eval.evaluateRowsAlpha('benchmark_id', {
+ const responsePromise = client.alpha.eval.evaluateRowsAlpha('benchmark_id', {
benchmark_config: {
eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' },
scoring_params: {
@@ -87,7 +87,7 @@ describe('resource eval', () => {
});
test('evaluateRowsAlpha: required and optional params', async () => {
- const response = await client.eval.evaluateRowsAlpha('benchmark_id', {
+ const response = await client.alpha.eval.evaluateRowsAlpha('benchmark_id', {
benchmark_config: {
eval_candidate: {
model: 'model',
@@ -117,7 +117,7 @@ describe('resource eval', () => {
});
test('runEval: only required params', async () => {
- const responsePromise = client.eval.runEval('benchmark_id', {
+ const responsePromise = client.alpha.eval.runEval('benchmark_id', {
benchmark_config: {
eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' },
scoring_params: {
@@ -140,7 +140,7 @@ describe('resource eval', () => {
});
test('runEval: required and optional params', async () => {
- const response = await client.eval.runEval('benchmark_id', {
+ const response = await client.alpha.eval.runEval('benchmark_id', {
benchmark_config: {
eval_candidate: {
model: 'model',
@@ -168,7 +168,7 @@ describe('resource eval', () => {
});
test('runEvalAlpha: only required params', async () => {
- const responsePromise = client.eval.runEvalAlpha('benchmark_id', {
+ const responsePromise = client.alpha.eval.runEvalAlpha('benchmark_id', {
benchmark_config: {
eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' },
scoring_params: {
@@ -191,7 +191,7 @@ describe('resource eval', () => {
});
test('runEvalAlpha: required and optional params', async () => {
- const response = await client.eval.runEvalAlpha('benchmark_id', {
+ const response = await client.alpha.eval.runEvalAlpha('benchmark_id', {
benchmark_config: {
eval_candidate: {
model: 'model',
diff --git a/tests/api-resources/eval/jobs.test.ts b/tests/api-resources/alpha/eval/jobs.test.ts
similarity index 81%
rename from tests/api-resources/eval/jobs.test.ts
rename to tests/api-resources/alpha/eval/jobs.test.ts
index cad4ebd..c18a9f2 100644
--- a/tests/api-resources/eval/jobs.test.ts
+++ b/tests/api-resources/alpha/eval/jobs.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource jobs', () => {
test('retrieve', async () => {
- const responsePromise = client.eval.jobs.retrieve('benchmark_id', 'job_id');
+ const responsePromise = client.alpha.eval.jobs.retrieve('benchmark_id', 'job_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -20,12 +20,12 @@ describe('resource jobs', () => {
test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('cancel', async () => {
- const responsePromise = client.eval.jobs.cancel('benchmark_id', 'job_id');
+ const responsePromise = client.alpha.eval.jobs.cancel('benchmark_id', 'job_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -38,12 +38,12 @@ describe('resource jobs', () => {
test('cancel: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
test('status', async () => {
- const responsePromise = client.eval.jobs.status('benchmark_id', 'job_id');
+ const responsePromise = client.alpha.eval.jobs.status('benchmark_id', 'job_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -56,7 +56,7 @@ describe('resource jobs', () => {
test('status: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
+ client.alpha.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
});
diff --git a/tests/api-resources/alpha/inference.test.ts b/tests/api-resources/alpha/inference.test.ts
new file mode 100644
index 0000000..0d353cc
--- /dev/null
+++ b/tests/api-resources/alpha/inference.test.ts
@@ -0,0 +1,32 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import LlamaStackClient from 'llama-stack-client';
+import { Response } from 'node-fetch';
+
+const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
+
+describe('resource inference', () => {
+ test('rerank: only required params', async () => {
+ const responsePromise = client.alpha.inference.rerank({
+ items: ['string'],
+ model: 'model',
+ query: 'string',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('rerank: required and optional params', async () => {
+ const response = await client.alpha.inference.rerank({
+ items: ['string'],
+ model: 'model',
+ query: 'string',
+ max_num_results: 0,
+ });
+ });
+});
diff --git a/tests/api-resources/post-training/job.test.ts b/tests/api-resources/alpha/post-training/job.test.ts
similarity index 77%
rename from tests/api-resources/post-training/job.test.ts
rename to tests/api-resources/alpha/post-training/job.test.ts
index 0cb1ebb..3f79918 100644
--- a/tests/api-resources/post-training/job.test.ts
+++ b/tests/api-resources/alpha/post-training/job.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource job', () => {
test('list', async () => {
- const responsePromise = client.postTraining.job.list();
+ const responsePromise = client.alpha.postTraining.job.list();
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -19,13 +19,13 @@ describe('resource job', () => {
test('list: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(client.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ await expect(client.alpha.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
LlamaStackClient.NotFoundError,
);
});
test('artifacts: only required params', async () => {
- const responsePromise = client.postTraining.job.artifacts({ job_uuid: 'job_uuid' });
+ const responsePromise = client.alpha.postTraining.job.artifacts({ job_uuid: 'job_uuid' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -36,11 +36,11 @@ describe('resource job', () => {
});
test('artifacts: required and optional params', async () => {
- const response = await client.postTraining.job.artifacts({ job_uuid: 'job_uuid' });
+ const response = await client.alpha.postTraining.job.artifacts({ job_uuid: 'job_uuid' });
});
test('cancel: only required params', async () => {
- const responsePromise = client.postTraining.job.cancel({ job_uuid: 'job_uuid' });
+ const responsePromise = client.alpha.postTraining.job.cancel({ job_uuid: 'job_uuid' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -51,11 +51,11 @@ describe('resource job', () => {
});
test('cancel: required and optional params', async () => {
- const response = await client.postTraining.job.cancel({ job_uuid: 'job_uuid' });
+ const response = await client.alpha.postTraining.job.cancel({ job_uuid: 'job_uuid' });
});
test('status: only required params', async () => {
- const responsePromise = client.postTraining.job.status({ job_uuid: 'job_uuid' });
+ const responsePromise = client.alpha.postTraining.job.status({ job_uuid: 'job_uuid' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -66,6 +66,6 @@ describe('resource job', () => {
});
test('status: required and optional params', async () => {
- const response = await client.postTraining.job.status({ job_uuid: 'job_uuid' });
+ const response = await client.alpha.postTraining.job.status({ job_uuid: 'job_uuid' });
});
});
diff --git a/tests/api-resources/post-training/post-training.test.ts b/tests/api-resources/alpha/post-training/post-training.test.ts
similarity index 93%
rename from tests/api-resources/post-training/post-training.test.ts
rename to tests/api-resources/alpha/post-training/post-training.test.ts
index ac7a53b..6069666 100644
--- a/tests/api-resources/post-training/post-training.test.ts
+++ b/tests/api-resources/alpha/post-training/post-training.test.ts
@@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL']
describe('resource postTraining', () => {
test('preferenceOptimize: only required params', async () => {
- const responsePromise = client.postTraining.preferenceOptimize({
+ const responsePromise = client.alpha.postTraining.preferenceOptimize({
algorithm_config: { beta: 0, loss_type: 'sigmoid' },
finetuned_model: 'finetuned_model',
hyperparam_search_config: { foo: true },
@@ -25,7 +25,7 @@ describe('resource postTraining', () => {
});
test('preferenceOptimize: required and optional params', async () => {
- const response = await client.postTraining.preferenceOptimize({
+ const response = await client.alpha.postTraining.preferenceOptimize({
algorithm_config: { beta: 0, loss_type: 'sigmoid' },
finetuned_model: 'finetuned_model',
hyperparam_search_config: { foo: true },
@@ -58,7 +58,7 @@ describe('resource postTraining', () => {
});
test('supervisedFineTune: only required params', async () => {
- const responsePromise = client.postTraining.supervisedFineTune({
+ const responsePromise = client.alpha.postTraining.supervisedFineTune({
hyperparam_search_config: { foo: true },
job_uuid: 'job_uuid',
logger_config: { foo: true },
@@ -74,7 +74,7 @@ describe('resource postTraining', () => {
});
test('supervisedFineTune: required and optional params', async () => {
- const response = await client.postTraining.supervisedFineTune({
+ const response = await client.alpha.postTraining.supervisedFineTune({
hyperparam_search_config: { foo: true },
job_uuid: 'job_uuid',
logger_config: { foo: true },
diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts
index 736d76a..9a0d2eb 100644
--- a/tests/api-resources/completions.test.ts
+++ b/tests/api-resources/completions.test.ts
@@ -24,13 +24,11 @@ describe('resource completions', () => {
best_of: 0,
echo: true,
frequency_penalty: 0,
- guided_choice: ['string'],
logit_bias: { foo: 0 },
logprobs: true,
max_tokens: 0,
n: 0,
presence_penalty: 0,
- prompt_logprobs: 0,
seed: 0,
stop: 'string',
stream: false,
diff --git a/tests/api-resources/vector-dbs.test.ts b/tests/api-resources/conversations/conversations.test.ts
similarity index 61%
rename from tests/api-resources/vector-dbs.test.ts
rename to tests/api-resources/conversations/conversations.test.ts
index 4af5adf..e13a9e4 100644
--- a/tests/api-resources/vector-dbs.test.ts
+++ b/tests/api-resources/conversations/conversations.test.ts
@@ -5,9 +5,9 @@ import { Response } from 'node-fetch';
const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
-describe('resource vectorDBs', () => {
- test('retrieve', async () => {
- const responsePromise = client.vectorDBs.retrieve('vector_db_id');
+describe('resource conversations', () => {
+ test('create', async () => {
+ const responsePromise = client.conversations.create({});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -17,15 +17,8 @@ describe('resource vectorDBs', () => {
expect(dataAndResponse.response).toBe(rawResponse);
});
- test('retrieve: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.vectorDBs.retrieve('vector_db_id', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(LlamaStackClient.NotFoundError);
- });
-
- test('list', async () => {
- const responsePromise = client.vectorDBs.list();
+ test('retrieve', async () => {
+ const responsePromise = client.conversations.retrieve('conversation_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -35,18 +28,15 @@ describe('resource vectorDBs', () => {
expect(dataAndResponse.response).toBe(rawResponse);
});
- test('list: request options instead of params are passed correctly', async () => {
+ test('retrieve: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(client.vectorDBs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
- LlamaStackClient.NotFoundError,
- );
+ await expect(
+ client.conversations.retrieve('conversation_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
});
- test('register: only required params', async () => {
- const responsePromise = client.vectorDBs.register({
- embedding_model: 'embedding_model',
- vector_db_id: 'vector_db_id',
- });
+ test('update: only required params', async () => {
+ const responsePromise = client.conversations.update('conversation_id', { metadata: { foo: 'string' } });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -56,19 +46,12 @@ describe('resource vectorDBs', () => {
expect(dataAndResponse.response).toBe(rawResponse);
});
- test('register: required and optional params', async () => {
- const response = await client.vectorDBs.register({
- embedding_model: 'embedding_model',
- vector_db_id: 'vector_db_id',
- embedding_dimension: 0,
- provider_id: 'provider_id',
- provider_vector_db_id: 'provider_vector_db_id',
- vector_db_name: 'vector_db_name',
- });
+ test('update: required and optional params', async () => {
+ const response = await client.conversations.update('conversation_id', { metadata: { foo: 'string' } });
});
- test('unregister', async () => {
- const responsePromise = client.vectorDBs.unregister('vector_db_id');
+ test('delete', async () => {
+ const responsePromise = client.conversations.delete('conversation_id');
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -78,10 +61,10 @@ describe('resource vectorDBs', () => {
expect(dataAndResponse.response).toBe(rawResponse);
});
- test('unregister: request options instead of params are passed correctly', async () => {
+ test('delete: request options instead of params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.vectorDBs.unregister('vector_db_id', { path: '/_stainless_unknown_path' }),
+ client.conversations.delete('conversation_id', { path: '/_stainless_unknown_path' }),
).rejects.toThrow(LlamaStackClient.NotFoundError);
});
});
diff --git a/tests/api-resources/conversations/items.test.ts b/tests/api-resources/conversations/items.test.ts
new file mode 100644
index 0000000..91c2488
--- /dev/null
+++ b/tests/api-resources/conversations/items.test.ts
@@ -0,0 +1,70 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import LlamaStackClient from 'llama-stack-client';
+import { Response } from 'node-fetch';
+
+const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
+
+describe('resource items', () => {
+ test('create: only required params', async () => {
+ const responsePromise = client.conversations.items.create('conversation_id', {
+ items: [{ content: 'string', role: 'system', type: 'message' }],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await client.conversations.items.create('conversation_id', {
+ items: [{ content: 'string', role: 'system', type: 'message', id: 'id', status: 'status' }],
+ });
+ });
+
+ test('list: only required params', async () => {
+ const responsePromise = client.conversations.items.list('conversation_id', {
+ after: 'string',
+ include: ['code_interpreter_call.outputs'],
+ limit: 0,
+ order: 'asc',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: required and optional params', async () => {
+ const response = await client.conversations.items.list('conversation_id', {
+ after: 'string',
+ include: ['code_interpreter_call.outputs'],
+ limit: 0,
+ order: 'asc',
+ });
+ });
+
+ test('get', async () => {
+ const responsePromise = client.conversations.items.get('conversation_id', 'item_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('get: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.conversations.items.get('conversation_id', 'item_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts
index 6482b2e..e3eec3d 100644
--- a/tests/api-resources/files.test.ts
+++ b/tests/api-resources/files.test.ts
@@ -24,6 +24,7 @@ describe('resource files', () => {
const response = await client.files.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
purpose: 'assistants',
+ expires_after: { anchor: 'created_at', seconds: 0 },
});
});
diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts
deleted file mode 100644
index e7d5df3..0000000
--- a/tests/api-resources/inference.test.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import LlamaStackClient from 'llama-stack-client';
-import { Response } from 'node-fetch';
-
-const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
-
-describe('resource inference', () => {
- test('batchChatCompletion: only required params', async () => {
- const responsePromise = client.inference.batchChatCompletion({
- messages_batch: [[{ content: 'string', role: 'user' }]],
- model_id: 'model_id',
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('batchChatCompletion: required and optional params', async () => {
- const response = await client.inference.batchChatCompletion({
- messages_batch: [[{ content: 'string', role: 'user', context: 'string' }]],
- model_id: 'model_id',
- logprobs: { top_k: 0 },
- response_format: { json_schema: { foo: true }, type: 'json_schema' },
- sampling_params: {
- strategy: { type: 'greedy' },
- max_tokens: 0,
- repetition_penalty: 0,
- stop: ['string'],
- },
- tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' },
- tools: [
- {
- tool_name: 'brave_search',
- description: 'description',
- parameters: {
- foo: { param_type: 'param_type', default: true, description: 'description', required: true },
- },
- },
- ],
- });
- });
-
- test('batchCompletion: only required params', async () => {
- const responsePromise = client.inference.batchCompletion({
- content_batch: ['string'],
- model_id: 'model_id',
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('batchCompletion: required and optional params', async () => {
- const response = await client.inference.batchCompletion({
- content_batch: ['string'],
- model_id: 'model_id',
- logprobs: { top_k: 0 },
- response_format: { json_schema: { foo: true }, type: 'json_schema' },
- sampling_params: {
- strategy: { type: 'greedy' },
- max_tokens: 0,
- repetition_penalty: 0,
- stop: ['string'],
- },
- });
- });
-
- test('chatCompletion: only required params', async () => {
- const responsePromise = client.inference.chatCompletion({
- messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('chatCompletion: required and optional params', async () => {
- const response = await client.inference.chatCompletion({
- messages: [{ content: 'string', role: 'user', context: 'string' }],
- model_id: 'model_id',
- logprobs: { top_k: 0 },
- response_format: { json_schema: { foo: true }, type: 'json_schema' },
- sampling_params: {
- strategy: { type: 'greedy' },
- max_tokens: 0,
- repetition_penalty: 0,
- stop: ['string'],
- },
- stream: false,
- tool_choice: 'auto',
- tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' },
- tool_prompt_format: 'json',
- tools: [
- {
- tool_name: 'brave_search',
- description: 'description',
- parameters: {
- foo: { param_type: 'param_type', default: true, description: 'description', required: true },
- },
- },
- ],
- });
- });
-
- test('completion: only required params', async () => {
- const responsePromise = client.inference.completion({ content: 'string', model_id: 'model_id' });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('completion: required and optional params', async () => {
- const response = await client.inference.completion({
- content: 'string',
- model_id: 'model_id',
- logprobs: { top_k: 0 },
- response_format: { json_schema: { foo: true }, type: 'json_schema' },
- sampling_params: {
- strategy: { type: 'greedy' },
- max_tokens: 0,
- repetition_penalty: 0,
- stop: ['string'],
- },
- stream: false,
- });
- });
-
- test('embeddings: only required params', async () => {
- const responsePromise = client.inference.embeddings({ contents: ['string'], model_id: 'model_id' });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('embeddings: required and optional params', async () => {
- const response = await client.inference.embeddings({
- contents: ['string'],
- model_id: 'model_id',
- output_dimension: 0,
- task_type: 'query',
- text_truncation: 'none',
- });
- });
-
- test('rerank: only required params', async () => {
- const responsePromise = client.inference.rerank({ items: ['string'], model: 'model', query: 'string' });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('rerank: required and optional params', async () => {
- const response = await client.inference.rerank({
- items: ['string'],
- model: 'model',
- query: 'string',
- max_num_results: 0,
- });
- });
-});
diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts
index f1142d8..0a4bab6 100644
--- a/tests/api-resources/responses/responses.test.ts
+++ b/tests/api-resources/responses/responses.test.ts
@@ -21,6 +21,7 @@ describe('resource responses', () => {
const response = await client.responses.create({
input: 'string',
model: 'model',
+ conversation: 'conversation',
include: ['string'],
instructions: 'instructions',
max_infer_iters: 0,
diff --git a/tests/api-resources/safety.test.ts b/tests/api-resources/safety.test.ts
index 4ca2ca6..6b43983 100644
--- a/tests/api-resources/safety.test.ts
+++ b/tests/api-resources/safety.test.ts
@@ -23,7 +23,7 @@ describe('resource safety', () => {
test('runShield: required and optional params', async () => {
const response = await client.safety.runShield({
- messages: [{ content: 'string', role: 'user', context: 'string' }],
+ messages: [{ content: 'string', role: 'user', name: 'name' }],
params: { foo: true },
shield_id: 'shield_id',
});
diff --git a/tests/api-resources/telemetry.test.ts b/tests/api-resources/telemetry.test.ts
index e042d08..5653ebb 100644
--- a/tests/api-resources/telemetry.test.ts
+++ b/tests/api-resources/telemetry.test.ts
@@ -53,42 +53,6 @@ describe('resource telemetry', () => {
);
});
- test('logEvent: only required params', async () => {
- const responsePromise = client.telemetry.logEvent({
- event: {
- message: 'message',
- severity: 'verbose',
- span_id: 'span_id',
- timestamp: '2019-12-27T18:11:19.117Z',
- trace_id: 'trace_id',
- type: 'unstructured_log',
- },
- ttl_seconds: 0,
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('logEvent: required and optional params', async () => {
- const response = await client.telemetry.logEvent({
- event: {
- message: 'message',
- severity: 'verbose',
- span_id: 'span_id',
- timestamp: '2019-12-27T18:11:19.117Z',
- trace_id: 'trace_id',
- type: 'unstructured_log',
- attributes: { foo: 'string' },
- },
- ttl_seconds: 0,
- });
- });
-
// unsupported query params in java / kotlin
test.skip('queryMetrics: only required params', async () => {
const responsePromise = client.telemetry.queryMetrics('metric_name', {
diff --git a/tests/api-resources/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts
new file mode 100644
index 0000000..98e8964
--- /dev/null
+++ b/tests/api-resources/vector-stores/file-batches.test.ts
@@ -0,0 +1,101 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import LlamaStackClient from 'llama-stack-client';
+import { Response } from 'node-fetch';
+
+const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' });
+
+describe('resource fileBatches', () => {
+ test('create: only required params', async () => {
+ const responsePromise = client.vectorStores.fileBatches.create('vector_store_id', {
+ file_ids: ['string'],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await client.vectorStores.fileBatches.create('vector_store_id', {
+ file_ids: ['string'],
+ attributes: { foo: true },
+ chunking_strategy: { type: 'auto' },
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
+ });
+
+ test('cancel', async () => {
+ const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('cancel: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
+ });
+
+ test('listFiles', async () => {
+ const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('listFiles: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
+ });
+
+ test('listFiles: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.vectorStores.fileBatches.listFiles(
+ 'vector_store_id',
+ 'batch_id',
+ { after: 'after', before: 'before', filter: 'filter', limit: 0, order: 'order' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(LlamaStackClient.NotFoundError);
+ });
+});