diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 59926dbf..448ca45f 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,7 +27,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 - name: Setup Pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b diff --git a/.github/workflows/firebase-hosting-merge.yml b/.github/workflows/firebase-hosting-merge.yml new file mode 100644 index 00000000..35567de1 --- /dev/null +++ b/.github/workflows/firebase-hosting-merge.yml @@ -0,0 +1,21 @@ +name: Deploy docs_v2 + +on: + push: + branches: + - main + +jobs: + build_and_deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + channelId: live + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/firebase-hosting-pull-request.yml b/.github/workflows/firebase-hosting-pull-request.yml new file mode 100644 index 00000000..0086d2e1 --- /dev/null +++ b/.github/workflows/firebase-hosting-pull-request.yml @@ -0,0 +1,26 @@ +name: Deploy docs_v2 on PR + +on: + pull_request: + paths: + - 'docs_v2/**' + +permissions: + checks: write + contents: read + pull-requests: write + +jobs: + build_and_preview: + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 + - run: npm ci && npm run build + working-directory: docs_v2 + - uses: FirebaseExtended/action-hosting-deploy@0cbcac4740c2bfb00d632f0b863b57713124eb5a + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_LANGCHAIN_DART }} + projectId: langchain-dart + entryPoint: docs_v2 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 114e4fab..1141bb2d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -4,6 +4,9 @@ on: # pull_request_target is dangerous! Review external PRs code before approving to run the workflow # We need this to be able to access the secrets required by the workflow pull_request_target: + paths-ignore: + - 'docs/**' + - 'docs_v2/**' workflow_dispatch: # Cancel currently running workflow when a new one is triggered @@ -19,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 with: ref: "${{ github.event.pull_request.base.sha }}" # Required for pull_request_target fetch-depth: 0 @@ -38,7 +41,7 @@ jobs: run: flutter pub cache clean - name: Install Melos - uses: bluefireteam/melos-action@6085791af7036f6366c9a4b9d55105c0ef9c6388 + uses: bluefireteam/melos-action@c7dcb921b23cc520cace360b95d02b37bf09cdaa with: run-bootstrap: false diff --git a/.gitignore b/.gitignore index dd509d78..cf493593 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ .dart_tool/ /pubspec.lock .vscode/ +.aider* diff --git a/CHANGELOG.md b/CHANGELOG.md index a668f597..59691103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,532 @@ # Change Log -Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +## 2024-09-25 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain` - `v0.7.6`](#langchain---v076) +- [`langchain_core` - `v0.3.6`](#langchain_core---v036) +- [`langchain_community` - `v0.3.2`](#langchain_community---v032) +- [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212) +- [`langchain_google` - `v0.6.3+1`](#langchain_google---v0631) +- [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032) +- [`langchain_openai` - `v0.7.2`](#langchain_openai---v072) +- [`ollama_dart` - `v0.2.2`](#ollama_dart---v022) +- [`openai_dart` - `v0.4.2`](#openai_dart---v042) +- [`langchain_supabase` - `v0.1.1+3`](#langchain_supabase---v0113) +- [`langchain_pinecone` - `v0.1.0+9`](#langchain_pinecone---v0109) +- [`langchain_anthropic` - `v0.1.1+2`](#langchain_anthropic---v0112) +- [`langchain_chroma` - `v0.2.1+3`](#langchain_chroma---v0213) +- [`langchain_mistralai` - `v0.2.3+1`](#langchain_mistralai---v0231) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_supabase` - `v0.1.1+3` +- `langchain_pinecone` - `v0.1.0+9` +- `langchain_anthropic` - `v0.1.1+2` +- `langchain_chroma` - `v0.2.1+3` +- `langchain_mistralai` - `v0.2.3+1` +- `vertex_ai` - `v0.1.0+2` + +--- + +#### `langchain` - `v0.7.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_core` - `v0.3.6` + +- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +#### `langchain_community` - `v0.3.2` + +- **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) +- **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + +#### `langchain_firebase` - `v0.2.1+2` + +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_google` - `v0.6.3+1` + +- **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) +- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +#### `langchain_ollama` - `v0.3.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `langchain_openai` - `v0.7.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + +#### `ollama_dart` - `v0.2.2` + +- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +#### `openai_dart` - `v0.4.2` + +- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) +- **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) +- **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + + +## 2024-08-22 + +### Changes + +--- + +Packages with breaking changes: + + - There are no breaking changes in this release. + +Packages with other changes: + + - [`langchain` - `v0.7.5`](#langchain---v075) + - [`langchain_core` - `v0.3.5`](#langchain_core---v035) + - [`langchain_community` - `v0.3.1`](#langchain_community---v031) + - [`langchain_openai` - `v0.7.1`](#langchain_openai---v071) + - [`langchain_ollama` - `v0.3.1`](#langchain_ollama---v031) + - [`langchain_google` - `v0.6.2`](#langchain_google---v062) + - [`langchain_mistralai` - `v0.2.3`](#langchain_mistralai---v023) + - [`ollama_dart` - `v0.2.1`](#ollama_dart---v021) + - [`openai_dart` - `v0.4.1`](#openai_dart---v041) + - [`langchain_firebase` - `v0.2.1+1`](#langchain_firebase---v0211) + - [`langchain_supabase` - `v0.1.1+2`](#langchain_supabase---v0112) + - [`langchain_pinecone` - `v0.1.0+8`](#langchain_pinecone---v0108) + - [`langchain_anthropic` - `v0.1.1+1`](#langchain_anthropic---v0111) + - [`langchain_chroma` - `v0.2.1+2`](#langchain_chroma---v0212) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + + - `langchain_firebase` - `v0.2.1+1` + - `langchain_supabase` - `v0.1.1+2` + - `langchain_pinecone` - `v0.1.0+8` + - `langchain_anthropic` - `v0.1.1+1` + - `langchain_chroma` - `v0.2.1+2` + +--- + +#### `langchain` - `v0.7.5` + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) + +#### `langchain_core` - `v0.3.5` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_community` - `v0.3.1` + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +#### `langchain_openai` - `v0.7.1` + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) + +#### `langchain_ollama` - `v0.3.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_google` - `v0.6.2` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `langchain_mistralai` - `v0.2.3` + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +#### `openai_dart` - `v0.4.1` + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + +#### `ollama_dart` - `v0.2.1` + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + + +## 2024-07-26 + +### Changes + +--- + +Packages with breaking changes: + + - [`langchain_community` - `v0.3.0`](#langchain_community---v030) + - [`langchain_ollama` - `v0.3.0`](#langchain_ollama---v030) + - [`langchain_openai` - `v0.7.0`](#langchain_openai---v070) + - [`ollama_dart` - `v0.2.0`](#ollama_dart---v020) + - [`openai_dart` - `v0.4.0`](#openai_dart---v040) + +Packages with other changes: + + - [`langchain` - `v0.7.4`](#langchain---v074) + - [`langchain_anthropic` - `v0.1.1`](#langchain_anthropic---v011) + - [`langchain_chroma` - `v0.2.1+1`](#langchain_chroma---v0211) + - [`langchain_core` - `v0.3.4`](#langchain_core---v034) + - [`langchain_firebase` - `v0.2.1`](#langchain_firebase---v021) + - [`langchain_google` - `v0.6.1`](#langchain_google---v061) + - [`langchain_mistralai` - `v0.2.2`](#langchain_mistralai---v022) + - [`langchain_pinecone` - `v0.1.0+7`](#langchain_pinecone---v0107) + - [`langchain_supabase` - `v0.1.1+1`](#langchain_supabase---v0111) + +--- + +#### `langchain` - `v0.7.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + +#### `langchain_core` - `v0.3.4` + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + +#### `langchain_community` - `v0.3.0` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_ollama` - `v0.3.0` + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `langchain_openai` - `v0.7.0` + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_anthropic` - `v0.1.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_firebase` - `v0.2.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_google` - `v0.6.1` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_mistralai` - `v0.2.2` + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_chroma` - `v0.2.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_pinecone` - `v0.1.0+7` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `langchain_supabase` - `v0.1.1+1` + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +#### `ollama_dart` - `v0.2.0` + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +#### `openai_dart` - `v0.4.0` + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + +## 2024-07-02 + +### Changes + +--- + +New packages: + +- [`langchain_anthropic` - `v0.1.0`](#langchain_anthropic---v010) +- [`tavily_dart` - `v0.1.0`](#tavily_dart---v010) + +Packages with breaking changes: + +- [`langchain_firebase` - `v0.2.0`](#langchain_firebase---v020) +- [`langchain_google` - `v0.6.0`](#langchain_google---v060) + +Packages with other changes: + +- [`langchain` - `v0.7.3`](#langchain---v073) +- [`langchain_core` - `v0.3.3`](#langchain_core---v033) +- [`langchain_community` - `v0.2.2`](#langchain_community---v022) +- [`langchain_chroma` - `v0.2.1`](#langchain_chroma---v021) +- [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) +- [`langchain_ollama` - `v0.2.2+1`](#langchain_ollama---v0221) +- [`langchain_openai` - `v0.6.3`](#langchain_openai---v063) +- [`langchain_pinecone` - `v0.1.0+6`](#langchain_pinecone---v0106) +- [`langchain_supabase` - `v0.1.1`](#langchain_supabase---v011) +- [`anthropic_sdk_dart` - `v0.1.0`](#anthropic_sdk_dart---v010) +- [`googleai_dart` - `v0.1.0+2`](#googleai_dart---v0102) +- [`mistralai_dart` - `v0.0.3+3`](#mistralai_dart---v0033) +- [`ollama_dart` - `v0.1.2`](#ollama_dart---v012) +- [`openai_dart` - `v0.3.3+1`](#openai_dart---v0331) + +--- + +#### `langchain` - `v0.7.3` + +> Note: Anthropic integration (`ChatAnthropic`) is available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_core` - `v0.3.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_community` - `v0.2.2` + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + +#### `langchain_anthropic` - `v0.1.0` + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +#### `langchain_firebase` - `v0.2.0` + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) +- **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) +- **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) +- **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + +#### `langchain_google` - `v0.6.0` + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + +- **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) +- **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `langchain_openai` - `v0.6.3` + +- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + +#### `langchain_ollama` - `v0.2.2+1` + +- **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +#### `langchain_chroma` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_mistralai` - `v0.2.1` + +- Update a dependency to the latest release. + +#### `langchain_pinecone` - `v0.1.0+6` + +- Update a dependency to the latest release. + +#### `langchain_supabase` - `v0.1.1` + +- Update a dependency to the latest release. + +#### `anthropic_sdk_dart` - `v0.1.0` + +- **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) +- **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) +- **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `ollama_dart` - `v0.1.2` + +- **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `tavily_dart` - `v0.1.0` + +- **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + +#### `googleai_dart` - `v0.1.0+2` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `mistralai_dart` - `v0.0.3+3` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +#### `openai_dart` - `v0.3.3+1` + +- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 2024-06-01 + +### Changes + +--- + +New packages: + + - [`anthropic_sdk_dart` - `v0.0.1`](#anthropic_sdk_dart---v001) + +Packages with other changes: + + - [`langchain` - `v0.7.2`](#langchain---v072) + - [`langchain_core` - `v0.3.2`](#langchain_core---v032) + - [`langchain_community` - `v0.2.1`](#langchain_community---v021) + - [`langchain_chroma` - `v0.2.0+5`](#langchain_chroma---v0205) + - [`langchain_firebase` - `v0.1.0+2`](#langchain_firebase---v0102) + - [`langchain_google` - `v0.5.1`](#langchain_google---v051) + - [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021) + - [`langchain_ollama` - `v0.2.2`](#langchain_ollama---v022) + - [`langchain_openai` - `v0.6.2`](#langchain_openai---v062) + - [`langchain_pinecone` - `v0.1.0+5`](#langchain_pinecone---v0105) + - [`langchain_supabase` - `v0.1.0+5`](#langchain_supabase---v0105) + - [`chromadb` - `v0.2.0+1`](#chromadb---v0201) + - [`googleai_dart` - `v0.1.0+1`](#googleai_dart---v0101) + - [`mistralai_dart` - `v0.0.3+2`](#mistralai_dart---v0032) + - [`ollama_dart` - `v0.1.1`](#ollama_dart---v011) + - [`openai_dart` - `v0.3.3`](#openai_dart---v033) + - [`vertex_ai` - `v0.1.0+1`](#vertex_ai---v0101) + +--- + +#### `langchain` - `v0.7.2` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + +#### `langchain_core` - `v0.3.2` + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + +#### `langchain_community` - `v0.2.1` + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + +#### `langchain_openai` - `v0.6.2` + + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + +#### `anthropic_sdk_dart` - `v0.0.1` + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + +#### `ollama_dart` - `v0.1.1` + + - **FEAT**: Support buffered stream responses ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + +#### `openai_dart` - `v0.3.3` + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + + +## 2024-05-20 + +### Changes + +--- + +Packages with breaking changes: + +- There are no breaking changes in this release. + +Packages with other changes: + +- [`langchain_firebase` - `v0.1.0+1`](#langchain_firebase---v0101) +- [`ollama_dart` - `v0.1.0+1`](#ollama_dart---v0101) +- [`openai_dart` - `v0.3.2+1`](#openai_dart---v0321) +- [`langchain_ollama` - `v0.2.1+1`](#langchain_ollama---v0211) +- [`langchain_openai` - `v0.6.1+1`](#langchain_openai---v0611) + +Packages with dependency updates only: + +> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project. + +- `langchain_ollama` - `v0.2.1+1` +- `langchain_openai` - `v0.6.1+1` + +--- + +#### `openai_dart` - `v0.3.2+1` + +- **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) +- **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + +#### `ollama_dart` - `v0.1.0+1` + +- **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + +#### `langchain_firebase` - `v0.1.0+1` + +- **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) ## 2024-05-14 @@ -2382,11 +2908,3 @@ Packages with changes: - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85)) - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7)) - -## 2023-07-02 - -### Changes - -#### `langchain` - `v0.0.1` - - - Initial public release. diff --git a/analysis_options.yaml b/analysis_options.yaml index d8f55c71..9a364363 100644 --- a/analysis_options.yaml +++ b/analysis_options.yaml @@ -7,6 +7,11 @@ analyzer: missing_return: error todo: ignore sdk_version_since: ignore # TODO remove when fixed https://github.com/dart-lang/sdk/issues/52327 + exclude: + - "**/generated_plugin_registrant.dart" + - "**/generated/**" + - "**/*.gen.dart" + - "**/*.g.dart" linter: rules: # https://dart-lang.github.io/linter/lints/{rule}.html @@ -30,7 +35,7 @@ linter: - avoid_null_checks_in_equality_operators - avoid_positional_boolean_parameters - avoid_print - # - avoid_redundant_argument_values # Sometimes is useful to be explicit + # - avoid_redundant_argument_values # I prefer to be explicit sometimes - avoid_relative_lib_imports - avoid_renaming_method_parameters - avoid_return_types_on_setters @@ -65,6 +70,7 @@ linter: # - diagnostic_describe_all_properties # Disabled because it's very verbose - directives_ordering - discarded_futures + # - document_ignores # Disabled because it's very verbose - empty_catches - empty_constructor_bodies - empty_statements @@ -76,6 +82,7 @@ linter: - implementation_imports - implicit_call_tearoffs - invalid_case_patterns + - invalid_runtime_check_with_js_interop_types - iterable_contains_unrelated_type - join_return_with_assignment - leading_newlines_in_multiline_strings @@ -85,6 +92,7 @@ linter: - library_private_types_in_public_api - list_remove_unrelated_type - matching_super_parameters + - missing_code_block_language_in_doc_comment - missing_whitespace_between_adjacent_strings - no_adjacent_strings_in_list - no_default_cases @@ -94,6 +102,7 @@ linter: - no_literal_bool_comparisons - no_logic_in_create_state - no_runtimeType_toString + - no_wildcard_variable_uses - non_constant_identifier_names - noop_primitive_operations - null_check_on_nullable_type_parameter @@ -116,7 +125,7 @@ linter: - prefer_final_fields - prefer_final_in_for_each - prefer_final_locals - # - prefer_final_parameters # adds too much verbosity + # - prefer_final_parameters # Very verbose - prefer_for_elements_to_map_fromIterable - prefer_foreach - prefer_function_declarations_over_variables @@ -152,6 +161,7 @@ linter: - type_init_formals - type_literal_in_constant_pattern - unawaited_futures + - unintended_html_in_doc_comment - unnecessary_await_in_return - unnecessary_brace_in_string_interps - unnecessary_breaks @@ -161,6 +171,7 @@ linter: - unnecessary_lambdas - unnecessary_late - unnecessary_library_directive + - unnecessary_library_name - unnecessary_new - unnecessary_null_aware_assignments - unnecessary_null_aware_operator_on_extension_on_nullable diff --git a/docs/CNAME b/docs/CNAME index 17960576..6217d1b3 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -langchaindart.com \ No newline at end of file +langchaindart.dev diff --git a/docs/README.md b/docs/README.md index 8f9a3f2f..12785c34 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ LCEL is a declarative way to compose chains. LCEL was designed from day 1 to sup - [Overview](/expression_language/expression_language): LCEL and its benefits - [Interface](/expression_language/interface): The standard interface for LCEL objects -- [Cookbook](https://langchaindart.com/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks +- [Cookbook](https://langchaindart.dev/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks ## Modules diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 532c82b8..ee2c472a 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -1,3 +1,4 @@ +- [![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) - [Get started](README.md) - [Installation](/get_started/installation.md) - [Quickstart](/get_started/quickstart.md) @@ -13,7 +14,9 @@ - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) + - [Retry: Retrying runnables](/expression_language/primitives/retry.md) - [Streaming](/expression_language/streaming.md) + - [Fallbacks](/expression_language/fallbacks.md) - Cookbook - [Prompt + LLM](/expression_language/cookbook/prompt_llm_parser.md) - [Multiple chains](/expression_language/cookbook/multiple_chains.md) @@ -56,6 +59,7 @@ - [Tool calling](/modules/model_io/models/chat_models/how_to/tools.md) - [LLMChain](/modules/model_io/models/chat_models/how_to/llm_chain.md) - Integrations + - [Anthropic](/modules/model_io/models/chat_models/integrations/anthropic.md) - [OpenAI](/modules/model_io/models/chat_models/integrations/openai.md) - [Firebase Vertex AI](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) @@ -94,6 +98,7 @@ - [Vector stores](/modules/retrieval/vector_stores/vector_stores.md) - Integrations - [Memory](/modules/retrieval/vector_stores/integrations/memory.md) + - [ObjectBox](/modules/retrieval/vector_stores/integrations/objectbox.md) - [Chroma](/modules/retrieval/vector_stores/integrations/chroma.md) - [Pinecone](/modules/retrieval/vector_stores/integrations/pinecone.md) - [Supabase](/modules/retrieval/vector_stores/integrations/supabase.md) @@ -118,7 +123,7 @@ - [Memory](/modules/memory/memory.md) - [Agents](/modules/agents/agents.md) - [Agent types](/modules/agents/agent_types/agent_types.md) - - [OpenAI functions](/modules/agents/agent_types/openai_tools_agent.md) + - [Tools Agent](/modules/agents/agent_types/tools_agent.md) - [Tools](/modules/agents/tools/tools.md) - [Calculator](/modules/agents/tools/calculator.md) - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md) diff --git a/docs/expression_language/cookbook/prompt_llm_parser.md b/docs/expression_language/cookbook/prompt_llm_parser.md index bb9a1a28..e96bf6c1 100644 --- a/docs/expression_language/cookbook/prompt_llm_parser.md +++ b/docs/expression_language/cookbook/prompt_llm_parser.md @@ -33,7 +33,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -74,7 +74,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -144,7 +144,7 @@ print(res); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/fallbacks.md b/docs/expression_language/fallbacks.md new file mode 100644 index 00000000..5fd0b8a7 --- /dev/null +++ b/docs/expression_language/fallbacks.md @@ -0,0 +1,135 @@ +# Fallbacks + +When working with language models, you may often encounter issues from the underlying APIs, e.g. rate limits or downtime. Therefore, as you move your LLM applications into production it becomes more and more important to have contingencies for errors. That's why we've introduced the concept of fallbacks. + +Crucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use e.g. a different prompt template. + +## Handling LLM API errors with fallbacks + +This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit a rate limit, or any number of things. This Situation can be handled using Fallbacks. + +Fallbacks can be created using `withFallbacks()` function on the runnable that you are working on, for example `final runnablWithFallbacks = mainRunnable.withFallbacks([fallback1, fallback2])` this would create a `RunnableWithFallback` along with a list of fallbacks. When it is invoked, the `mainRunnable` would be called first, if it fails then fallbacks would be invoked sequentially until one of the fallback in list return output. If the `mainRunnable` succeeds and returns output then the fallbacks won't be called. + +## Fallback for chat models + +```dart +// fake model will throw error during invoke and fallback model will be called +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + +final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + +final res = await modelWithFallbacks.invoke(prompt); +print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +``` + +Note: if the options provided when invoking the runnable with fallbacks are not compatible with some of the fallbacks, they will be ignored. If you want to use different options for different fallbacks, provide them as `defaultOptions` when instantiating the fallbacks or use `bind()`. + +## Fallbacks for RunnableSequences with batch + +```dart +final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), +); + +final latestModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + +final badChain = promptTemplate.pipe(fakeOpenAIModel); +final goodChain = promptTemplate.pipe(latestModel); + +final chainWithFallbacks = badChain.withFallbacks([goodChain]); + +final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +``` diff --git a/docs/expression_language/get_started.md b/docs/expression_language/get_started.md index 70c12b9a..9b51efe6 100644 --- a/docs/expression_language/get_started.md +++ b/docs/expression_language/get_started.md @@ -120,7 +120,7 @@ print(res2); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/interface.md b/docs/expression_language/interface.md index 9b7085d8..30fcf890 100644 --- a/docs/expression_language/interface.md +++ b/docs/expression_language/interface.md @@ -107,7 +107,7 @@ final res = await chain.batch( {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/docs/expression_language/primitives.md b/docs/expression_language/primitives.md index 89d618e4..aecaa93e 100644 --- a/docs/expression_language/primitives.md +++ b/docs/expression_language/primitives.md @@ -11,3 +11,4 @@ This section goes into greater depth on where and how some of these components a - [Function: Run custom logic](/expression_language/primitives/function.md) - [Binding: Configuring runnables](/expression_language/primitives/binding.md) - [Router: Routing inputs](/expression_language/primitives/router.md) +- [Retry: Retrying Runnable](/expression_language/primitives/retry.md) diff --git a/docs/expression_language/primitives/binding.md b/docs/expression_language/primitives/binding.md index a04a511f..2aeb9575 100644 --- a/docs/expression_language/primitives/binding.md +++ b/docs/expression_language/primitives/binding.md @@ -57,7 +57,7 @@ final chain = Runnable.fromMap({ chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); diff --git a/docs/expression_language/primitives/function.md b/docs/expression_language/primitives/function.md index e0b621fd..88bf731b 100644 --- a/docs/expression_language/primitives/function.md +++ b/docs/expression_language/primitives/function.md @@ -76,7 +76,7 @@ await chain.invoke('x raised to the third plus seven equals 12'); // }, // finishReason: FinishReason.stop, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -122,7 +122,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -141,7 +141,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -160,7 +160,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){}); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/docs/expression_language/primitives/mapper.md b/docs/expression_language/primitives/mapper.md index 2fb57295..bc599cb6 100644 --- a/docs/expression_language/primitives/mapper.md +++ b/docs/expression_language/primitives/mapper.md @@ -54,9 +54,7 @@ In the following example, the model streams the output in chunks and the output final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/expression_language/primitives/retry.md b/docs/expression_language/primitives/retry.md new file mode 100644 index 00000000..ef6ae6c9 --- /dev/null +++ b/docs/expression_language/primitives/retry.md @@ -0,0 +1,94 @@ +# RunnableRetry : Retrying Runnables + +`RunnableRetry` wraps a `Runnable` and retries it if it fails. It be created using `runnable.withRetry()`. + +By default, the runnable will be retried 3 times with exponential backoff strategy. + +## Usage + +## Creating a RunnableRetry + +```dart +final model = ChatOpenAI(); +final input = PromptValue.string('Explain why sky is blue in 2 lines'); + +final modelWithRetry = model.withRetry(); +final res = await modelWithRetry.invoke(input); +print(res); +``` + +## Retrying a chain + +`RunnableRetry` can be used to retry any `Runnable`, including a chain of `Runnable`s. + +Example + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'gpt-4o'), +); +final chain = promptTemplate.pipe(model).withRetry(); + +final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], +); +print(res); +``` + +> In general, it's best to keep the scope of the retry as small as possible. + +## Configuring the retry + +```dart +// passing a fake model to cause Exception +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, +); +final res = await modelWithRetry.invoke(input); +print(res); +// retried 3 times and returned Exception: +// OpenAIClientException({ +// "uri": "https://api.openai.com/v1/chat/completions", +// "method": "POST", +// "code": 404, +// "message": "Unsuccessful response", +// "body": { +// "error": { +// "message": "The model `fake-model` does not exist or you do not have access to it.", +// "type": "invalid_request_error", +// "param": null, +// "code": "model_not_found" +// } +// } +// }) +``` + +## Passing delay durations + +If you want to use custom delay durations for each retry attempt, you can pass a list of `Duration` objects to the `delayDurations` parameter. + +```dart +final input = PromptValue.string('Explain why sky is blue in 2 lines'); +final model = ChatOpenAI( + defaultOptions: ChatOpenAIOptions(model: 'fake-model'), +); +final modelWithRetry = model.withRetry( + maxRetries: 3, + delayDurations: [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], +); +final res = await modelWithRetry.invoke(input); +print(res); +``` diff --git a/docs/expression_language/primitives/router.md b/docs/expression_language/primitives/router.md index 15b6f8ad..da5a59c6 100644 --- a/docs/expression_language/primitives/router.md +++ b/docs/expression_language/primitives/router.md @@ -12,7 +12,7 @@ First, let’s create a chain that will identify incoming questions as being abo ```dart final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -131,7 +131,7 @@ Here is a question: {query} '''; -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( promptTemplates.map((final pt) => Document(pageContent: pt)).toList(), @@ -146,7 +146,7 @@ final chain = Runnable.fromMap({'query': Runnable.passthrough()}) | return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | StringOutputParser(); diff --git a/docs/expression_language/streaming.md b/docs/expression_language/streaming.md index 8b4b720f..dd04c9c6 100644 --- a/docs/expression_language/streaming.md +++ b/docs/expression_language/streaming.md @@ -49,7 +49,7 @@ print(chunks.first); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -71,7 +71,7 @@ print(result); // }, // finishReason: FinishReason.unspecified, // metadata: { -// model: gpt-3.5-turbo-0125, +// model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -124,9 +124,7 @@ Let’s see such a parser in action to understand what this means. final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/index.html b/docs/index.html index eab7ac39..6d4f395b 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2,16 +2,13 @@ - + @@ -41,7 +38,7 @@ - + diff --git a/docs/modules/agents/agent_types/agent_types.md b/docs/modules/agents/agent_types/agent_types.md index 229422ee..d6c79bd0 100644 --- a/docs/modules/agents/agent_types/agent_types.md +++ b/docs/modules/agents/agent_types/agent_types.md @@ -8,7 +8,7 @@ response to the user. Here are the agents available in LangChain. ### OpenAI Functions -Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been +Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been explicitly fine-tuned to detect when a function should to be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models. diff --git a/docs/modules/agents/agent_types/openai_tools_agent.md b/docs/modules/agents/agent_types/openai_tools_agent.md deleted file mode 100644 index db68921e..00000000 --- a/docs/modules/agents/agent_types/openai_tools_agent.md +++ /dev/null @@ -1,173 +0,0 @@ -# OpenAI tools - -Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been -fine-tuned to detect when a tool should to be called and respond with the -inputs that should be passed to the tool. In an API call, you can describe -tools and have the model intelligently choose to output a JSON object -containing arguments to call those tools. The goal of the OpenAI Function -APIs is to more reliably return valid and useful tool calls than a generic -text completion or chat API. - -The OpenAI Tools Agent is designed to work with these models. - -> **Note**: Must be used with an [OpenAI Tools](https://platform.openai.com/docs/guides/function-calling) model. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - model: 'gpt-4-turbo', - temperature: 0, - ), -); -final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); -final executor = AgentExecutor(agent: agent); -final res = await executor.run('What is 40 raised to the 0.43 power? '); -print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' -``` - -You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. - -Let's see an example of how to do this. - -First let's create a class that will be the input for our tool. - -```dart -class SearchInput { - const SearchInput({ - required this.query, - required this.n, - }); - - final String query; - final int n; - - SearchInput.fromJson(final Map json) - : this( - query: json['query'] as String, - n: json['n'] as int, - ); -} -``` - -Now let's define the tool: - -```dart -final tool = Tool.fromFunction( - name: 'search', - description: 'Tool for searching the web.', - inputJsonSchema: const { - 'type': 'object', - 'properties': { - 'query': { - 'type': 'string', - 'description': 'The query to search for', - }, - 'n': { - 'type': 'number', - 'description': 'The number of results to return', - }, - }, - 'required': ['query'], - }, - func: callYourSearchFunction, - getInputFromJson: SearchInput.fromJson, -); -``` - -Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. - -The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. -```dart -String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; -} -``` - -Now we can create the agent and run it. - -```dart -final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), -); - -final memory = ConversationBufferMemory(returnMessages: true); -final agent = OpenAIToolsAgent.fromLLMAndTools( - llm: llm, - tools: [tool], - memory: memory, -); - -final executor = AgentExecutor(agent: agent); - -final res1 = await executor.run( - 'Search for cats. Return only 3 results.', -); -print(res1); -// Here are 3 search results for "cats": -// 1. Result 1 -// 2. Result 2 -// 3. Result 3 -``` - -## Using LangChain Expression Language (LCEL) - -You can replicate the functionality of the OpenAI Functions Agent by using the LangChain Expression Language (LCEL) directly. - -```dart -final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - -final prompt = ChatPromptTemplate.fromTemplates(const [ - (ChatMessageType.system, 'You are a helpful assistant'), - (ChatMessageType.human, '{input}'), - (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), -]); - -final tool = CalculatorTool(); - -final model = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: ChatOpenAIOptions( - temperature: 0, - tools: [tool], - ), -); - -const outputParser = OpenAIToolsAgentOutputParser(); - -List buildScratchpad(final List intermediateSteps) { - return intermediateSteps - .map((final s) { - return s.action.messageLog + - [ - ChatMessage.tool( - toolCallId: s.action.id, - content: s.observation, - ), - ]; - }) - .expand((final m) => m) - .toList(growable: false); -} - -final agent = Agent.fromRunnable( - Runnable.mapInput( - (final AgentPlanInput planInput) => { - 'input': planInput.inputs['input'], - 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), - }, - ).pipe(prompt).pipe(model).pipe(outputParser), - tools: [tool], -); -final executor = AgentExecutor(agent: agent); - -final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', -}); -print(res['output']); -// 40 raised to the power of 0.43 is approximately 4.88524. -``` - -In this way, you can create your own custom agents with full control over their behavior. diff --git a/docs/modules/agents/agent_types/tools_agent.md b/docs/modules/agents/agent_types/tools_agent.md new file mode 100644 index 00000000..45a14352 --- /dev/null +++ b/docs/modules/agents/agent_types/tools_agent.md @@ -0,0 +1,190 @@ +# Tools Agent + +An agent powered by the [tool calling API](/modules/model_io/models/chat_models/how_to/tools.md). + +This agent is designed to work with any chat model that supports tool calling. It can interpret the model's output and decide when to call specific tools based on that output. + +**Supported models:** +You can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the [tool calling docs](/modules/model_io/models/chat_models/how_to/tools.md) for a complete list. + +## Usage + +In the following example, we use `ChatOllama` with the `llama3.2` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression. + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +//... + +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + ), +); +final tool = CalculatorTool(); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final executor = AgentExecutor(agent: agent); +final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +); +print(res); +// The result is: 4.885 +``` + +## Custom tools + +You can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent. + +Let's see an example of how to do this. + +First, let's create a class that will be the input for our tool. + +```dart +@immutable +class SearchInput { + const SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); +} +``` + +Now let's define the tool: + +```dart +final searchTool = Tool.fromFunction( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: callYourSearchFunction, + getInputFromJson: SearchInput.fromJson, +); +``` + +Notice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined. + +The tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function. +```dart +String callYourSearchFunction(final SearchInput input) { + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; +} +``` + +Now we can create the agent and run it: + +```dart +final llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), +); + +final memory = ConversationBufferMemory(returnMessages: true); +final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + tools: [searchTool], + memory: memory, +); + +final executor = AgentExecutor(agent: agent); + +final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', +); +print(res1); +// Here are the top 3 cat names I found: AAA, BBB, and CCC. +``` + +## Custom agent using LangChain Expression Language (LCEL) + +You can replicate the functionality of the Tools Agent by using the LangChain Expression Language (LCEL) directly. + +```dart +final openAiKey = Platform.environment['OPENAI_API_KEY']; + +final prompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), +]); + +final tool = CalculatorTool(); + +final model = ChatOpenAI( + apiKey: openAiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', + temperature: 0, + tools: [tool], + ), +); + +const outputParser = ToolsAgentOutputParser(); + +List buildScratchpad(final List intermediateSteps) { + return intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false); +} + +final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), + }, + ).pipe(prompt).pipe(model).pipe(outputParser), + tools: [tool], +); +final executor = AgentExecutor(agent: agent); + +final res = await executor.invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', +}); +print(res['output']); +// The result of 40 raised to the power of 0.43 is approximately 4.885. +``` + +In this way, you can create your own custom agents with full control over their behavior, while still leveraging the flexibility of the Tools Agent to work with various language models and tools. diff --git a/docs/modules/agents/agents.md b/docs/modules/agents/agents.md index ab56353c..78004d19 100644 --- a/docs/modules/agents/agents.md +++ b/docs/modules/agents/agents.md @@ -75,7 +75,7 @@ First, let's load the language model we're going to use to control the agent. ```dart final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); ``` @@ -91,7 +91,7 @@ Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. ```dart -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); ``` Now let's create the agent executor and test it out! diff --git a/docs/modules/agents/tools/calculator.md b/docs/modules/agents/tools/calculator.md index 0847f2eb..fe9f127c 100644 --- a/docs/modules/agents/tools/calculator.md +++ b/docs/modules/agents/tools/calculator.md @@ -14,7 +14,7 @@ final llm = ChatOpenAI( ), ); final tool = CalculatorTool(); -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/docs/modules/agents/tools/openai_dall_e.md b/docs/modules/agents/tools/openai_dall_e.md index 9d30914b..426f4d89 100644 --- a/docs/modules/agents/tools/openai_dall_e.md +++ b/docs/modules/agents/tools/openai_dall_e.md @@ -18,7 +18,7 @@ final tools = [ CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; -final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/docs/modules/model_io/models/chat_models/chat_models.md b/docs/modules/model_io/models/chat_models/chat_models.md index 5aabfd23..e191707b 100644 --- a/docs/modules/model_io/models/chat_models/chat_models.md +++ b/docs/modules/model_io/models/chat_models/chat_models.md @@ -93,5 +93,5 @@ print(chatRes1.generations); print(chatRes1.usage?.totalTokens); // -> 36 print(chatRes1.modelOutput); -// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-3.5-turbo} +// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini} ``` diff --git a/docs/modules/model_io/models/chat_models/how_to/tools.md b/docs/modules/model_io/models/chat_models/how_to/tools.md index 16c12081..11bf5f3e 100644 --- a/docs/modules/model_io/models/chat_models/how_to/tools.md +++ b/docs/modules/model_io/models/chat_models/how_to/tools.md @@ -3,9 +3,11 @@ > We use the term "tool calling" interchangeably with "function calling". Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message. > Tool calling is currently supported by: -> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) +> - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md) > - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md) > - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md) +> - [`ChatOllama`](/modules/model_io/models/chat_models/integrations/ollama.md) +> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md) Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to extract output matching some schema from unstructured text, you could give the model an “extraction” tool that takes parameters matching the desired schema, then treat the generated output as your final result. diff --git a/docs/modules/model_io/models/chat_models/integrations/anthropic.md b/docs/modules/model_io/models/chat_models/integrations/anthropic.md new file mode 100644 index 00000000..b607ddc7 --- /dev/null +++ b/docs/modules/model_io/models/chat_models/integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md index 8dc05345..167ffe13 100644 --- a/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md +++ b/docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md @@ -66,31 +66,31 @@ print(res); ## Available models The following models are available: -- `gemini-1.0-pro` - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-1.0-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-preview-0514`: +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-preview-0514`: +- `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 + * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 16384 + * Max output tokens: 2048 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 32760 * Max output tokens: 8192 -Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the updated list. ## Multimodal support ```dart final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); final res = await chatModel.invoke( @@ -122,7 +122,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', ), ); @@ -140,7 +140,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart @@ -160,7 +160,7 @@ const tool = ToolSpec( ); final chatModel = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/docs/modules/model_io/models/chat_models/integrations/googleai.md b/docs/modules/model_io/models/chat_models/integrations/googleai.md index 6eca8777..12ff5f2c 100644 --- a/docs/modules/model_io/models/chat_models/integrations/googleai.md +++ b/docs/modules/model_io/models/chat_models/integrations/googleai.md @@ -7,24 +7,24 @@ Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemin To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). The following models are available: -- `gemini-1.0-pro` (or `gemini-pro`): - * text -> text model - * Max input token: 30720 - * Max output tokens: 2048 -- `gemini-pro-vision`: - * text / image -> text model - * Max input token: 12288 - * Max output tokens: 4096 -- `gemini-1.5-pro-latest`: text / image -> text model +- `gemini-1.5-flash`: * text / image / audio -> text model * Max input token: 1048576 * Max output tokens: 8192 -- `gemini-1.5-flash-latest`: +- `gemini-1.5-pro`: * text / image / audio -> text model - * Max input token: 1048576 + * Max input token: 2097152 * Max output tokens: 8192 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 32760 + * Max output tokens: 8192 +- `aqa`: + * text -> text model + * Max input token: 7168 + * Max output tokens: 1024 -Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) for the updated list. ## Usage @@ -34,7 +34,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -63,7 +63,7 @@ final apiKey = Platform.environment['GOOGLEAI_API_KEY']; final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -99,7 +99,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ final chatModel = ChatGoogleGenerativeAI( apiKey: apiKey, defaultOptions: const ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, ), ); @@ -118,7 +118,7 @@ await stream.forEach(print); `ChatGoogleGenerativeAI` supports tool calling. -Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. Example: ```dart @@ -138,7 +138,7 @@ const tool = ToolSpec( ); final chatModel = ChatGoogleGenerativeAI( defaultOptions: ChatGoogleGenerativeAIOptions( - model: 'gemini-1.5-pro-latest', + model: 'gemini-1.5-pro', temperature: 0, tools: [tool], ), diff --git a/docs/modules/model_io/models/chat_models/integrations/ollama.md b/docs/modules/model_io/models/chat_models/integrations/ollama.md index 37110289..f612616e 100644 --- a/docs/modules/model_io/models/chat_models/integrations/ollama.md +++ b/docs/modules/model_io/models/chat_models/integrations/ollama.md @@ -2,13 +2,9 @@ Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. -Ollama allows you to run open-source large language models, such as Llama 3, locally. +Ollama allows you to run open-source large language models, such as Llama 3.2 or Gemma 2, locally. -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. - -It optimizes setup and configuration details, including GPU usage. - -For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. ## Setup @@ -16,7 +12,31 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.2` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.2', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.2', + ), +); +``` ## Usage @@ -28,7 +48,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ final chatModel = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, ), ); @@ -44,7 +64,9 @@ print(res); // -> 'La traduction est : "J'aime le programming.' ``` -## Streaming +### Streaming + +Ollama supports streaming the output as the model generates it. ```dart final promptTemplate = ChatPromptTemplate.fromTemplates([ @@ -53,7 +75,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, ), ); @@ -61,14 +83,107 @@ final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); final stream = chain.stream({'max_num': '9'}); await stream.forEach(print); -// 1 -// 2 -// 3 -// .. -// 9 +// 123 +// 456 +// 789 +``` + +### Multimodal support + +Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). + +You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llava', + temperature: 0, + ), +); +final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), +); +final res = await chatModel.invoke(PromptValue.chat([prompt])); +print(res.output.content); +// -> 'An Apple' +``` + +### Tool calling + +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +**Notes:** +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.2` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.2:70b` or `llama3.2:405b`. + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] ``` -## JSON mode +As you can see, `ChatOllama` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + +### JSON mode You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. @@ -79,7 +194,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ]); final chat = ChatOllama( defaultOptions: ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -97,42 +212,214 @@ print(res); // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} ``` -## Multimodal support +## Examples -Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). +### Answering questions with data from an external API -You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. ```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions( - model: 'llava', + defaultOptions: const ChatOllamaOptions( + model: 'llama3.2', temperature: 0, + tools: [getFlightTimesTool], ), ); -final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, ), - ]), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, ); -final res = await chatModel.invoke(PromptValue.chat([prompt])); -print(res.output.content); -// -> 'An Apple' + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } ``` -## RAG (Retrieval-Augmented Generation) pipeline +### RAG (Retrieval-Augmented Generation) pipeline We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. ```dart // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ @@ -149,7 +436,7 @@ final promptTemplate = ChatPromptTemplate.fromTemplates([ // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: ChatOllamaOptions(model: 'llama3'), + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: VectorStoreRetrieverOptions( diff --git a/docs/modules/model_io/models/chat_models/integrations/open_router.md b/docs/modules/model_io/models/chat_models/integrations/open_router.md index e747ca5f..c2d63555 100644 --- a/docs/modules/model_io/models/chat_models/integrations/open_router.md +++ b/docs/modules/model_io/models/chat_models/integrations/open_router.md @@ -95,3 +95,63 @@ await stream.forEach(print); // 123 // 456789 ``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/docs/modules/model_io/models/chat_models/integrations/openai.md b/docs/modules/model_io/models/chat_models/integrations/openai.md index df92b348..6b3ccbbc 100644 --- a/docs/modules/model_io/models/chat_models/integrations/openai.md +++ b/docs/modules/model_io/models/chat_models/integrations/openai.md @@ -1,25 +1,78 @@ # OpenAI -[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of -chat models with different levels of power suitable for different tasks. +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). -This example goes over how to use LangChain to interact with -OpenAI [models](https://platform.openai.com/docs/models) using the Chat API. +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; -final promptTemplate = ChatPromptTemplate.fromTemplates([ - (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), - (ChatMessageType.human, '{text}'), -]); - final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' temperature: 0, + // ...other options ), ); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); final chain = promptTemplate | chatModel | StringOutputParser(); @@ -32,15 +85,16 @@ print(res); // -> 'J'adore la programmation.' ``` -## Streaming +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. ```dart final openaiApiKey = Platform.environment['OPENAI_API_KEY']; final promptTemplate = ChatPromptTemplate.fromTemplates([ - ( - ChatMessageType.system, - 'You are a helpful assistant that replies only with numbers ' + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' 'in order without any spaces or commas', ), (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), @@ -57,7 +111,91 @@ await stream.forEach(print); // 789 ``` -You can also stream OpenAI tool calls: +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: ```dart const tool = ToolSpec( @@ -108,9 +246,76 @@ await for (final chunk in stream) { // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} ``` -## JSON mode +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode -GPT-4 Turbo supports a new JSON mode, which ensures the model will respond with valid JSON. JSON mode is useful for developers generating JSON in the Chat Completions API outside of function calling. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. ```dart final prompt = PromptValue.chat([ @@ -127,9 +332,7 @@ final llm = ChatOpenAI( defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); @@ -148,3 +351,22 @@ print(res); // ] // } ``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs/modules/model_io/models/llms/integrations/ollama.md b/docs/modules/model_io/models/llms/integrations/ollama.md index 3a90917c..25f6806e 100644 --- a/docs/modules/model_io/models/llms/integrations/ollama.md +++ b/docs/modules/model_io/models/llms/integrations/ollama.md @@ -16,7 +16,7 @@ Follow [these instructions](https://github.com/jmorganca/ollama) to set up and r 1. Download and install [Ollama](https://ollama.ai) 2. Fetch a model via `ollama pull ` - * e.g., for Llama 3: `ollama pull llama3` + * e.g., for Llama 3: `ollama pull llama3.2` ## Usage @@ -26,7 +26,7 @@ final prompt = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.2', ), ); final chain = prompt | llm | StringOutputParser(); @@ -43,7 +43,7 @@ final promptTemplate = PromptTemplate.fromTemplate( ); final llm = Ollama( defaultOptions: OllamaOptions( - model: 'llama3', + model: 'llama3.2', ), ); final chain = promptTemplate | llm | StringOutputParser(); diff --git a/docs/modules/model_io/output_parsers/json.md b/docs/modules/model_io/output_parsers/json.md index 905b380b..06451f17 100644 --- a/docs/modules/model_io/output_parsers/json.md +++ b/docs/modules/model_io/output_parsers/json.md @@ -21,9 +21,7 @@ final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/docs/modules/retrieval/text_embedding/integrations/google_ai.md b/docs/modules/retrieval/text_embedding/integrations/google_ai.md index 6d84e8a1..657d7f6d 100644 --- a/docs/modules/retrieval/text_embedding/integrations/google_ai.md +++ b/docs/modules/retrieval/text_embedding/integrations/google_ai.md @@ -6,8 +6,6 @@ The embedding service in the [Gemini API](https://ai.google.dev/docs/embeddings_ - `text-embedding-004` * Dimensions: 768 (with support for reduced dimensionality) -- `embedding-001` - * Dimensions: 768 The previous list of models may not be exhaustive or up-to-date. Check out the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of available models. diff --git a/docs/modules/retrieval/text_embedding/integrations/ollama.md b/docs/modules/retrieval/text_embedding/integrations/ollama.md index 395b1203..b13ddd28 100644 --- a/docs/modules/retrieval/text_embedding/integrations/ollama.md +++ b/docs/modules/retrieval/text_embedding/integrations/ollama.md @@ -1,7 +1,7 @@ # OllamaEmbeddings ```dart -final embeddings = OllamaEmbeddings(model: 'llama3'); +final embeddings = OllamaEmbeddings(model: 'llama3.2'); const text = 'This is a test document.'; final res = await embeddings.embedQuery(text); final res = await embeddings.embedDocuments([text]); diff --git a/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png b/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png new file mode 100644 index 00000000..6d88c06f Binary files /dev/null and b/docs/modules/retrieval/vector_stores/integrations/img/objectbox.png differ diff --git a/docs/modules/retrieval/vector_stores/integrations/memory.md b/docs/modules/retrieval/vector_stores/integrations/memory.md index 58f3aacb..7acb37cf 100644 --- a/docs/modules/retrieval/vector_stores/integrations/memory.md +++ b/docs/modules/retrieval/vector_stores/integrations/memory.md @@ -1,8 +1,8 @@ # MemoryVectorStore -`MemoryVectorStore` is an in-memory, ephemeral vector store that stores -embeddings in-memory and does an exact, linear search for the most similar -embeddings. The default similarity metric is cosine similarity. +`MemoryVectorStore` is an in-memory, ephemeral vector store that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity. + +This class is useful for testing and prototyping, but it is not recommended for production use cases. See other vector store integrations for production use cases. ```dart const filePath = './test/chains/assets/state_of_the_union.txt'; @@ -30,7 +30,7 @@ final docSearch = await MemoryVectorStore.fromDocuments( ); final llm = ChatOpenAI( apiKey: openAiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + defaultOptions: ChatOpenAIOptions(temperature: 0), ); final qaChain = OpenAIQAWithSourcesChain(llm: llm); final docPrompt = PromptTemplate.fromTemplate( diff --git a/docs/modules/retrieval/vector_stores/integrations/objectbox.md b/docs/modules/retrieval/vector_stores/integrations/objectbox.md new file mode 100644 index 00000000..08a07bc2 --- /dev/null +++ b/docs/modules/retrieval/vector_stores/integrations/objectbox.md @@ -0,0 +1,346 @@ +# ObjectBox + +Vector store for the [ObjectBox](https://objectbox.io/) on-device database. + +ObjectBox features: +- Embedded Database that runs inside your application without latency +- Vector search based is state-of-the-art HNSW algorithm that scales very well with growing data volume +- HNSW is tightly integrated within ObjectBox's internal database. Vector Search doesn’t just run “on top of database persistence” +- With this deep integration ObjectBox does not need to keep all vectors in memory +- Multi-layered caching: if a vector is not in-memory, ObjectBox fetches it from disk +- Not just a vector database: you can store any data in ObjectBox, not just vectors. You won’t need a second database +- Low minimum hardware requirements: e.g. an old Raspberry Pi comfortably runs ObjectBox smoothly +- Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB) +- Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers + +Official ObjectBox resources: +- [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +- [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0) +- [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter) + +## Setup + +LangChain.dart offers two classes for working with ObjectBox: +- `ObjectBoxVectorStore`: This vector stores creates a `Store` with an `ObjectBoxDocument` entity that persists LangChain `Document`s along with their embeddings. +- `BaseObjectBoxVectorStore`: If you need more control over the entity (e.g. if you need to persist custom fields), you can use this class instead. + +### 1. Add ObjectBox to your project + +See the [ObjectBox documentation](https://docs.objectbox.io/getting-started) to learn how to add ObjectBox to your project. + +Note that the integration differs depending on whether you are building a Flutter application or a pure Dart application. + +### 2. Add the LangChain.dart Community package + +Add the `langchain_community` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} +``` + +### 3. Instantiate the ObjectBox vector store + +```dart +final embeddings = OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'); +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 512, +); +``` + +The dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) model, which has 512 dimensions. + +The `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored: + +```dart +final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 512, + directory: 'path/to/db', +); +``` + +## Usage + +### Storing vectors + +```dart +final res = await vectorStore.addDocuments( + documents: [ + Document( + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + Document( + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + ], +); +``` + +### Querying vectors + +```dart +final res = await vectorStore.similaritySearch( + query: 'Where is the cat?', + config: const ObjectBoxSimilaritySearch(k: 1), +); +``` + +You can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `ObjectBoxSimilaritySearch` config object. + +#### Filtering + +You can use the `ObjectBoxSimilaritySearch` class to pass ObjectBox-specific filtering options. + +`ObjectBoxVectorStore` supports filtering queries by id, content or metadata using ObjectBox's `Condition`. You can define the filter condition in the `ObjectBoxSimilaritySearch.filterCondition` parameter. Use the `ObjectBoxDocumentProps` class to reference the entity fields to use in the query. + +For example: +```dart +final res = await vectorStore.similaritySearch( + query: 'What should I feed my cat?', + config: ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.id.equals('my-id') + .or(ObjectBoxDocumentProps.metadata.contains('some-text')), + ), +); +``` + +### Deleting vectors + +To delete documents, you can use the `delete` method passing the ids of the documents you want to delete. + +```dart +await vectorStore.delete(ids: ['9999']); +``` + +You can also use `deleteWhere` to delete documents based on a condition. + +```dart +await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('cat'), +); +``` + +## Example: Building a Fully Local RAG App with ObjectBox and Ollama + +This example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions. + +![RAG Pipeline](img/objectbox.png) + +#### Prerequisites + +Before running the example, make sure you have the following: + +- Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions). +- [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) and [llama3:8b](https://ollama.com/library/llama3:8b) models downloaded. + +#### Steps + +**Step 1: Retrieving and Storing Documents** + +1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader. +2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`. +3. Create embeddings from the document chunks using the `jina/jina-embeddings-v2-small-en` embeddings model via `OllamaEmbeddings`. +4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`. + +> Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries. + +**Step 2: Constructing the RAG Pipeline** + +1. Set up a retrieval pipeline that takes a user question as input and retrieves the most relevant documents from the ObjectBox vector store. +2. Format the retrieved documents into a single string containing the source, title, and content of each document. +3. Pass the formatted string to the Llama 3 model to generate a response to the user question. + +```dart +// 1. Instantiate vector store +final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, +); + +// 2. Load documents +const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', +]); +final List docs = await loader.load(); + +// 3. Split docs into chunks +const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, +); +final List chunkedDocs = await splitter.invoke(docs); + +// 4. Add documents to vector store +await vectorStore.addDocuments(documents: chunkedDocs); + +// 5. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), +]); + +// 6. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3.2'), +); +final retriever = vectorStore.asRetriever(); + +// 7. Create a Runnable that combines the retrieved documents into a single formatted string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''').join('\n'); +}); + +// 8. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 9. Run the pipeline +final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', +); +await stream.forEach(stdout.write); +// According to the sources provided, ObjectBox Vector Search uses the HNSW +// (Hierarchical Navigable Small World) algorithm [1]. +// +// And yes, you can use it in Flutter apps. The article specifically mentions +// that ObjectBox 4.0 introduces an on-device vector database for the +// Dart/Flutter platform [2]. +// +// Sources: +// [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ +// [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +``` + +## Example: Wikivoyage EU + +Check out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_dart/tree/main/examples/wikivoyage_eu), to see how to build a fully local chatbot that uses RAG to plan vacation plans in Europe. + +## Advance + +### BaseObjectBoxVectorStore + +If you need more control over the entity (e.g. if you are using ObjectBox to store other entities, or if you need to customize the Document entity class.), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`. + +`BaseObjectBoxVectorStore` requires the following parameters: +- `embeddings`: The embeddings model to use. +- `box`: The ObjectBox `Box` instance to use. +- `createEntity`: A function that creates an entity from the given data. +- `createDocument`: A function that creates a LangChain's `Document` from the given entity. +- `getIdProperty`: A function that returns the ID property of the entity. +- `getEmbeddingProperty`: A function that returns the embedding property of the entity. + +Here is an example of how to use this class: + +First, you can define our own Document entity class instead of using the one provided by the [ObjectBoxVectorStore]. In this way, you can customize the entity to your needs. You will need to define the mapping logic between the entity and the LangChain [Document] model. + +```dart +@Entity() +class MyDocumentEntity { + MyDocumentEntity({ + required this.id, + required this.content, + required this.metadata, + required this.embedding, + }); + @Id() + int internalId = 0; + @Unique(onConflict: ConflictStrategy.replace) + String id; + String content; + String metadata; + @HnswIndex( + dimensions: 768, + distanceType: VectorDistanceType.cosine, + ) + @Property(type: PropertyType.floatVector) + List embedding; + factory MyDocumentEntity.fromModel( + Document doc, List embedding, + ) => MyDocumentEntity( + id: doc.id ?? '', + content: doc.pageContent, + metadata: jsonEncode(doc.metadata), + embedding: embedding, + ); + Document toModel() => Document( + id: id, + pageContent: content, + metadata: jsonDecode(metadata), + ); +} +``` + +After defining the entity class, you will need to run the ObjectBox generator: + +```sh +dart run build_runner build --delete-conflicting-outputs +``` + +Then, you just need to create your custom vector store class that extends [BaseObjectBoxVectorStore] and wire everything up: + +```dart +class MyCustomVectorStore extends BaseObjectBoxVectorStore { + MyCustomVectorStore({ + required super.embeddings, + required Store store, + }) : super( + box: store.box(), + createEntity: ( + String id, + String content, + String metadata, + List embedding, + ) => + MyDocumentEntity( + id: id, + content: content, + metadata: metadata, + embedding: embedding, + ), + createDocument: (MyDocumentEntity docDto) => docDto.toModel(), + getIdProperty: () => MyDocumentEntity_.id, + getEmbeddingProperty: () => MyDocumentEntity_.embedding, + ); +} +``` + +Now you can use the [MyCustomVectorStore] class to store and search documents. diff --git a/docs_v2/.firebaserc b/docs_v2/.firebaserc new file mode 100644 index 00000000..15e3b72b --- /dev/null +++ b/docs_v2/.firebaserc @@ -0,0 +1,5 @@ +{ + "projects": { + "default": "langchain-dart" + } +} diff --git a/docs_v2/.gitignore b/docs_v2/.gitignore new file mode 100644 index 00000000..0f21febf --- /dev/null +++ b/docs_v2/.gitignore @@ -0,0 +1,21 @@ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.firebase diff --git a/docs_v2/README.md b/docs_v2/README.md new file mode 100644 index 00000000..0c6c2c27 --- /dev/null +++ b/docs_v2/README.md @@ -0,0 +1,41 @@ +# Website + +This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. + +### Installation + +``` +$ yarn +``` + +### Local Development + +``` +$ yarn start +``` + +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. + +### Build + +``` +$ yarn build +``` + +This command generates static content into the `build` directory and can be served using any static contents hosting service. + +### Deployment + +Using SSH: + +``` +$ USE_SSH=true yarn deploy +``` + +Not using SSH: + +``` +$ GIT_USER= yarn deploy +``` + +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs_v2/babel.config.js b/docs_v2/babel.config.js new file mode 100644 index 00000000..e00595da --- /dev/null +++ b/docs_v2/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/docs_v2/docs/01-intro.md b/docs_v2/docs/01-intro.md new file mode 100644 index 00000000..75428706 --- /dev/null +++ b/docs_v2/docs/01-intro.md @@ -0,0 +1,171 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# Introduction + +Build Dart/Flutter applications powered by Large Language Models. + +## What is LangChain.dart? + +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). LangChain is a framework for developing applications that are powered by large language models (LLMs). + +It comes with a set of components that make working with LLMs easy. +The components can be grouped into a few core modules: + +![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png) + +- 📃 **Model I/O:** LangChain offers a unified API for interacting with various LLM providers (e.g. OpenAI, Google, Mistral, Ollama, etc.), allowing developers to switch between them with ease. Additionally, it provides tools for managing model inputs (prompt templates and example selectors) and parsing the resulting model outputs (output parsers). +- 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). +- 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. + +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). + +## Motivation + +Large Language Models (LLMs) have revolutionized Natural Language Processing (NLP), serving as essential components in a wide range of applications, such as question-answering, summarization, translation, and text generation. + +The adoption of LLMs is creating a new tech stack in its wake. However, emerging libraries and tools are predominantly being developed for the Python and JavaScript ecosystems. As a result, the number of applications leveraging LLMs in these ecosystems has grown exponentially. + +In contrast, the Dart / Flutter ecosystem has not experienced similar growth, which can likely be attributed to the scarcity of Dart and Flutter libraries that streamline the complexities associated with working with LLMs. + +LangChain.dart aims to fill this gap by abstracting the intricacies of working with LLMs in Dart and Flutter, enabling developers to harness their combined potential effectively. + +## Packages + +LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: + +### [`langchain_core`](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + + +## Getting started + +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): + +```yaml +dependencies: + langchain: {version} + langchain_community: {version} + langchain_openai: {version} + langchain_google: {version} + ... +``` + +The most basic building block of LangChain.dart is calling an LLM on some prompt. LangChain.dart provides a unified interface for calling different LLMs. For example, we can use `ChatGoogleGenerativeAI` to call Google's Gemini model: + +```dart +final model = ChatGoogleGenerativeAI(apiKey: googleApiKey); +final prompt = PromptValue.string('Hello world!'); +final result = await model.invoke(prompt); +// Hello everyone! I'm new here and excited to be part of this community. +``` + +But the power of LangChain.dart comes from chaining together multiple components to implement complex use cases. For example, a RAG (Retrieval-Augmented Generation) pipeline that would accept a user query, retrieve relevant documents from a vector store, format them using prompt templates, invoke the model, and parse the output: + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OpenAIEmbeddings(apiKey: openaiApiKey), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Define the retrieval chain +final retriever = vectorStore.asRetriever(); +final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\n')), + ), + 'question': Runnable.passthrough(), +}); + +// 3. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 4. Define the final chain +final model = ChatOpenAI(apiKey: openaiApiKey); +const outputParser = StringOutputParser(); +final chain = setupAndRetrieval + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// David created LangChain.dart +``` + +## Documentation + +- [LangChain.dart documentation](https://langchaindart.dev) +- [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) +- [LangChain.dart blog](https://blog.langchaindart.dev) +- [Project board](https://github.com/users/davidmigloz/projects/2/views/1) + +## Community + +Stay up-to-date on the latest news and updates on the field, have great discussions, and get help in the official [LangChain.dart Discord server](https://discord.gg/x4qbhqecVR). + +[![LangChain.dart Discord server](https://invidget.switchblade.xyz/x4qbhqecVR?theme=light)](https://discord.gg/x4qbhqecVR) + +## Contribute + +| 📢 **Call for Collaborators** 📢 | +|-------------------------------------------------------------------------| +| We are looking for collaborators to join the core group of maintainers. | + +New contributors welcome! Check out our [Contributors Guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for help getting started. + +Join us on [Discord](https://discord.gg/x4qbhqecVR) to meet other maintainers. We'll help you get your first contribution in no time! + +## Related projects + +- [LangChain](https://github.com/langchain-ai/langchain): The original Python LangChain project. +- [LangChain.js](https://github.com/langchain-ai/langchainjs): A JavaScript port of LangChain. +- [LangChain.go](https://github.com/tmc/langchaingo): A Go port of LangChain. +- [LangChain.rb](https://github.com/andreibondarev/langchainrb): A Ruby port of LangChain. + +## Sponsors + +

+ + + +

+ +## License + +LangChain.dart is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/docs_v2/docs/02-tutorials/01-llm_chain.md b/docs_v2/docs/02-tutorials/01-llm_chain.md new file mode 100644 index 00000000..e40bbb77 --- /dev/null +++ b/docs_v2/docs/02-tutorials/01-llm_chain.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + + +# Build a Simple LLM Application with LCEL \ No newline at end of file diff --git a/docs_v2/docs/02-tutorials/index.mdx b/docs_v2/docs/02-tutorials/index.mdx new file mode 100644 index 00000000..82e56f9e --- /dev/null +++ b/docs_v2/docs/02-tutorials/index.mdx @@ -0,0 +1,28 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- +# Tutorials + +New to LangChain or to LLM app development in general? Read this material to quickly get up and running. + +## Basics +- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain) +- [Build a Chatbot](/docs/tutorials/chatbot) +- [Build vector stores and retrievers](/docs/tutorials/retrievers) +- [Build an Agent](/docs/tutorials/agents) + +## Working with external knowledge +- [Build a Retrieval Augmented Generation (RAG) Application](/docs/tutorials/rag) +- [Build a Conversational RAG Application](/docs/tutorials/qa_chat_history) +- [Build a Question/Answering system over SQL data](/docs/tutorials/sql_qa) +- [Build a Query Analysis System](/docs/tutorials/query_analysis) +- [Build a local RAG application](/docs/tutorials/local_rag) +- [Build a Question Answering application over a Graph Database](/docs/tutorials/graph) +- [Build a PDF ingestion and Question/Answering system](/docs/tutorials/pdf_qa/) + +## Specialized tasks +- [Build an Extraction Chain](/docs/tutorials/extraction) +- [Generate synthetic data](/docs/tutorials/data_generation) +- [Classify text into labels](/docs/tutorials/classification) +- [Summarize text](/docs/tutorials/summarization) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/01-installation.md b/docs_v2/docs/03-how_to/01-installation.md new file mode 100644 index 00000000..44604573 --- /dev/null +++ b/docs_v2/docs/03-how_to/01-installation.md @@ -0,0 +1,77 @@ +# Installation +Langchain as a framework consists of a number of packages. They're split into different packages allowing you to choose exactly what pieces of the framework to install and use. + +## Installing essential Langchain.dart packages + +### [`langchain`](https://pub.dev/packages/langchain) +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with Langchain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +```bash +dart pub add langchain +``` + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +> Depend on this package to build frameworks on top of Langchain.dart or to interoperate with it. + +To install this package in your Dart or Flutter project +```bash +dart pub add langchain_core +``` + +### [`langchain_community`](https://pub.dev/packages/langchain_community) +Contains third-party integrations and community-contributed components that are not part of the core Langchain.dart API. +> Depend on this package if you want to use any of the integrations or components it provides like CSV,JSON,Text or HTML loaders and more. + +```bash +dart pub add langchain langchain_community +``` + +## Integration packages +Certain integrations like OpenAI and Anthropic have their own packages. Any integrations that require their own package will be documented as such in the Integration docs. + + +Let's say you're using [OpenAI](https://platform.openai.com/), install the `langchain_openai` package. +```bash +dart pub add langchain langchain_community langchain_openai +``` + +Let's say you want Google integration to use (GoogleAI, VertexAI, Gemini etc), install the `langchain_google` package. +```bash +dart pub add langchain langchain_community langchain_google +``` +The following table contains the list of existing Langchain.dart integration packages. + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | +## Documentation + +Detailed documentation for various integrations can be found in the `/docs/05-integration/` directory: + +- [Anthropic](/docs/integrations/anthropic) +- [Anyscale](/docs/integrations/anyscale) +- [Firebase VertexAI](/docs/integrations/firebase_vertex_ai) +- [GCP VertexAI](/docs/integrations/gcp_vertex_ai) +- [GoogleAI](/docs/integrations/googleai) +- [MistralAI](/docs/integrations/mistralai) +- [Ollama](/docs/integrations/ollama) +- [OpenRouter](/docs/integrations/open_router) +- [OpenAI](/docs/integrations/openai) +- [PrEM](/docs/integrations/prem) +- [TogetherAI](/docs/integrations/together_ai) \ No newline at end of file diff --git a/docs_v2/docs/03-how_to/02-structured_output.md b/docs_v2/docs/03-how_to/02-structured_output.md new file mode 100644 index 00000000..95983cd6 --- /dev/null +++ b/docs_v2/docs/03-how_to/02-structured_output.md @@ -0,0 +1,14 @@ +--- +sidebar_position: 3 +keywords: [structured output, json, information extraction, with_structured_output] +--- +# How to return structured data from a model + +> This guide assumes familiarity with the following concepts: +> - [Chat models](/docs/concepts/#chat-models) +> - [Function/tool calling](/docs/concepts/#functiontool-calling) + + +It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model. + + diff --git a/docs_v2/docs/03-how_to/index.mdx b/docs_v2/docs/03-how_to/index.mdx new file mode 100644 index 00000000..81ea6bc7 --- /dev/null +++ b/docs_v2/docs/03-how_to/index.mdx @@ -0,0 +1,149 @@ +--- +sidebar_position: 0 +sidebar_class_name: hidden +--- + +# How-to guides + +Here you'll find answers to "How do I...?" types of questions. +These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task. +For conceptual explanations see the [Conceptual guide](/docs/concepts/). +For end-to-end walkthroughs see [Tutorials](/docs/tutorials). +For comprehensive descriptions of every class and function see the [API Reference](https://pub.dev/documentation/langchain/latest/index.html). + + +## Installation + +- [How to: install LangChain packages](/docs/how_to/installation/) + +## Key features +This highlights functionality that is core to using LangChain. + +- [How to: return structured data from a model](/docs/how_to/structured_output/) +- [How to: use a model to call tools](/docs/how_to/tool_calling) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: debug your LLM apps](/docs/how_to/debugging/) + +[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol. + +[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives. + +- [How to: chain runnables](/docs/how_to/sequence) +- [How to: stream runnables](/docs/how_to/streaming) +- [How to: invoke runnables in parallel](/docs/how_to/parallel/) +- [How to: add default invocation args to runnables](/docs/how_to/binding/) +- [How to: turn any function into a runnable](/docs/how_to/functions) +- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough) +- [How to: configure runnable behavior at runtime](/docs/how_to/configure) +- [How to: add message history (memory) to a chain](/docs/how_to/message_history) +- [How to: route between sub-chains](/docs/how_to/routing) +- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/) +- [How to: inspect runnables](/docs/how_to/inspect) +- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks) +- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains) +- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets) + + +## Components + +These are the core building blocks you can use when building applications. + +### Prompt templates + +[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. + +- [How to: use few shot examples](/docs/how_to/few_shot_examples) +- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/) +- [How to: partially format prompt templates](/docs/how_to/prompts_partial) +- [How to: compose prompts together](/docs/how_to/prompts_composition) + +### Chat models + +[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. + +- [How to: do function/tool calling](/docs/how_to/tool_calling) +- [How to: get models to return structured output](/docs/how_to/structured_output) +- [How to: cache model responses](/docs/how_to/chat_model_caching) +- [How to: get log probabilities](/docs/how_to/logprobs) +- [How to: create a custom chat model class](/docs/how_to/custom_chat_model) +- [How to: stream a response back](/docs/how_to/chat_streaming) +- [How to: track token usage](/docs/how_to/chat_token_usage_tracking) +- [How to: track response metadata across providers](/docs/how_to/response_metadata) +- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/) +- [How to: use chat model to call tools](/docs/how_to/tool_calling) +- [How to: stream tool calls](/docs/how_to/tool_streaming) +- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot) +- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific) +- [How to: force a specific tool call](/docs/how_to/tool_choice) +- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/) + +### LLMs + +What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string. + +- [How to: cache model responses](/docs/how_to/llm_caching) +- [How to: create a custom LLM class](/docs/how_to/custom_llm) +- [How to: stream a response back](/docs/how_to/streaming_llm) +- [How to: track token usage](/docs/how_to/llm_token_usage_tracking) +- [How to: work with local LLMs](/docs/how_to/local_llms) + +### Document loaders + +[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. + +- [How to: load CSV data](/docs/how_to/document_loader_csv) +- [How to: load data from a directory](/docs/how_to/document_loader_directory) +- [How to: load HTML data](/docs/how_to/document_loader_html) +- [How to: load JSON data](/docs/how_to/document_loader_json) +- [How to: load Markdown data](/docs/how_to/document_loader_markdown) +- [How to: load Microsoft Office data](/docs/how_to/document_loader_office_file) +- [How to: load PDF files](/docs/how_to/document_loader_pdf) +- [How to: write a custom document loader](/docs/how_to/document_loader_custom) + +### Text splitters + +[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. + +- [How to: recursively split text](/docs/how_to/recursive_text_splitter) +- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter) +- [How to: split by HTML sections](/docs/how_to/HTML_section_aware_splitter) +- [How to: split by character](/docs/how_to/character_text_splitter) +- [How to: split code](/docs/how_to/code_splitter) +- [How to: split Markdown by headers](/docs/how_to/markdown_header_metadata_splitter) +- [How to: recursively split JSON](/docs/how_to/recursive_json_splitter) +- [How to: split text into semantic chunks](/docs/how_to/semantic-chunker) +- [How to: split by tokens](/docs/how_to/split_by_token) + +### Vector stores + +[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores) + +### Retrievers + +[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. + +- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever) +- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever) +- [How to: use contextual compression to compress the data retrieved](/docs/how_to/contextual_compression) +- [How to: write a custom retriever class](/docs/how_to/custom_retriever) +- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever) +- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever) +- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder) +- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector) +- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever) +- [How to: generate metadata filters](/docs/how_to/self_query) +- [How to: create a time-weighted retriever](/docs/how_to/time_weighted_vectorstore) +- [How to: use hybrid vector and keyword retrieval](/docs/how_to/hybrid) + +### Agents + +:::note + +For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation. + +::: + +- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor) +- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent) \ No newline at end of file diff --git a/docs_v2/docs/04-concepts.mdx b/docs_v2/docs/04-concepts.mdx new file mode 100644 index 00000000..fcd2335b --- /dev/null +++ b/docs_v2/docs/04-concepts.mdx @@ -0,0 +1,468 @@ +# Conceptual guide + +This section contains introductions to key parts of LangChain.dart + +## Architecture + +LangChain.dart as a framework consists of a number of packages. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) +This package contains base abstractions of different components and ways to compose them together. +The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. +No third party integrations are defined here. + +> Depend on this package to build frameworks on top of .dart.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with .dart.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + +See [Integrations](/docs/integrations) to integrate with a specific package. + +## LangChain Expression Language (LCEL) + +LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: + +- **First-class streaming support:** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. +- **Optimized concurrent execution:** Whenever your LCEL chains have steps that can be executed concurrently (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency. +- **Retries and fallbacks:** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. +- **Access intermediate results:** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain. + +### Runnable interface + +To make it as easy as possible to create custom chains, LangChain provides a `Runnable` interface that most components implement, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. + +This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes: + +- `invoke`: call the chain on an input and return the output. +- `stream`: call the chain on an input and stream the output. +- `batch`: call the chain on a list of inputs and return a list of outputs. + +The type of the input and output varies by component: + +| Component | Input Type | Output Type | +|-----------------------------|------------------------|------------------------| +| `PromptTemplate` | `Map` | `PromptValue` | +| `ChatMessagePromptTemplate` | `Map` | `PromptValue` | +| `LLM` | `PromptValue` | `LLMResult` | +| `ChatModel` | `PromptValue` | `ChatResult` | +| `OutputParser` | Any object | Parser output type | +| `Retriever` | `String` | `List` | +| `DocumentTransformer` | `List` | `List` | +| `Tool` | `Map` | `String` | +| `Chain` | `Map` | `Map` | + +## Components + +### Chat models +Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). +These are traditionally newer models (older models are generally `LLMs`, see below). +Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. + +Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs. + +When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model. + +LangChain does not host any Chat Models, rather we rely on third party integrations. + +We have some standardized parameters when constructing ChatModels: +- `model`: the name of the model +- `temperature`: the sampling temperature +- `timeout`: request timeout +- `maxTokens`: max tokens to generate +- `apiKey`: API key for the model provider +- `baseUrl`: endpoint to send requests to + +Some important things to note: +- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these. +- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``. + +ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model. + +### LLMs +:::caution +Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), +even for non-chat use cases. + +You are probably looking for [the section above instead](/docs/concepts/#chat-models). +::: + +Language models that takes a string as input and returns a string. +These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). + +Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. +This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). +When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. + +LangChain.dart does not host any LLMs, rather we rely on third party integrations. See (/docs/integrations) + + + +### Messages +Some language models take a list of messages as input and return a message. + +LangChain provides several objects to easily distinguish between different roles: +#### HumanChatMessage +This represents a message from the user. + +#### AIChatMessage +This represents a message from the model. + +#### SystemChatMessage +This represents a system message, which tells the model how to behave. Not every model provider supports this. + +#### FunctionChatMessage / ToolChatMessage +These represent a decision from an language model to call a tool. They're a subclass of a AIChatMessage. FunctionChatMessage is a legacy message type corresponding to OpenAI's legacy function-calling API. + +### Prompt Templates + +Most LLM applications do not pass user input directly into an `LLM`. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. + +In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. + +`PromptTemplates` help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be: + +```dart +final prompt = PromptTemplate.fromTemplate( + 'What is a good name for a company that makes {product}?', +); +final res = prompt.format({'product': 'colorful socks'}); +print(res); +// 'What is a good name for a company that makes colorful socks?' +``` + +However, the advantages of using these over raw string formatting are several. You can "partial" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. + +For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +`PromptTemplates` can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc) Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessagePromptTemplates`. Each `ChatMessagePromptTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. Let's take a look at this below: + +```dart +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +const humanTemplate = '{text}'; + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, template), + (ChatMessageType.human, humanTemplate), +]); + +final res = chatPrompt.formatMessages({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// [ +// SystemChatMessage(content='You are a helpful assistant that translates English to French.'), +// HumanChatMessage(content='I love programming.') +// ] +``` + +`ChatPromptTemplates` can also be constructed in other ways - For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). + +### Output parsers + + +:::note + +The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. +More and more models are supporting function (or tool) calling, which handles this automatically. +It is recommended to use function/tool calling rather than output parsing. +See documentation for that [here](/docs/concepts/#function-tool-calling). + +::: +`OutputParsers` convert the raw output of an LLM into a format that can be used downstream. There are few main type of `OutputParsers`, including: + +- Convert text from LLM -> structured information (e.g. JSON). +- Convert a `ChatMessage` into just a string. +- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. + +For full information on this, see the section on [output parsers](/docs/how_to/#output-parsers). + +### Chat history +Most LLM applications have a conversational interface. An essential component of a conversation is being able to refer to information introduced earlier in the conversation. At bare minimum, a conversational system should be able to access some window of past messages directly. + +The concept of ChatHistory refers to a class in LangChain which can be used to wrap an arbitrary chain. This ChatHistory will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. Future interactions will then load those messages and pass them into the chain as part of the input. + +### Documents +A Document object in LangChain contains information about some data. It has two attributes: +- pageContent: `String` - The content of this document. +- metadata: `Map` - Arbitrary metadata associated with this document. Can track the document id, file name, etc. + +### Document loaders +Use document loaders to load data from a source as `Document`'s. For example, there are document loaders +for loading a simple .txt file, for loading the text contents of any web page, +or even for loading a transcript of a YouTube video. + +Document loaders expose two methods: + +- `lazyLoad()`: returns a `Stream` of `Document`'s. This is useful for loading + large amounts of data, as it allows you to process each `Document` as it is + loaded, rather than waiting for the entire data set to be loaded in memory. +- `load()`: returns a list of `Document`'s. Under the hood, this method calls + `lazyLoad()` and collects the results into a list. Use this method only with + small data sets. + +The simplest loader reads in a file as text and places it all into one +`Document`. + +```dart + +const filePath = 'example.txt'; +const loader = TextLoader(filePath); +final docs = await loader.load(); +``` + +### Text splitters +Once you've loaded documents, you'll often want to transform them to better suit +your application. The simplest example is you may want to split a long document +into smaller chunks that can fit into your model's context window. LangChain has +a number of built-in document transformers that make it easy to split, combine, +filter, and otherwise manipulate documents. + +## Text splitters + +When you want to deal with long pieces of text, it is necessary to split up that +text into chunks. As simple as this sounds, there is a lot of potential +complexity here. Ideally, you want to keep the semantically related pieces of +text together. What "semantically related" means could depend on the type of +text. This tutorial showcases several ways to do that. + +At a high level, text splitters work as following: + +1. Split the text up into small, semantically meaningful chunks (often + sentences). +2. Start combining these small chunks into a larger chunk until you reach a + certain size (as measured by some function). +3. Once you reach that size, make that chunk its own piece of text and then + start creating a new chunk of text with some overlap (to keep context between + chunks). + +That means there are two different axes along which you can customize your text +splitter: + +1. How the text is split. +2. How the chunk size is measured. + +The most basic text splitter is the `CharacterTextSplitter`. This splits based +on characters (by default `\n\n`) and measure chunk length by number of +characters. + +The default recommended text splitter is the `RecursiveCharacterTextSplitter`. This text splitter +takes a list of characters. It tries to create chunks based on splitting on the first character, +but if any chunks are too large it then moves onto the next character, and so forth. By default +the characters it tries to split on are `["\n\n", "\n", " ", ""]`. + +In addition to controlling which characters you can split on, you can also +control a few other things: + +- `lengthFunction`: how the length of chunks is calculated. Defaults to just + counting number of characters, but it's pretty common to pass a token counter + here. +- `chunkSize`: the maximum size of your chunks (as measured by the length + function). +- `chunkOverlap`: the maximum overlap between chunks. It can be nice to have + some overlap to maintain some continuity between chunks (eg do a sliding + window). +- `addStartIndex`: whether to include the starting position of each chunk within + the original document in the metadata. + +```dart +const filePath = 'state_of_the_union.txt'; +const loader = TextLoader(filePath); +final documents = await loader.load(); +const textSplitter = RecursiveCharacterTextSplitter( + chunkSize: 800, + chunkOverlap: 0, +); +final docs = textSplitter.splitDocuments(documents); +``` + +### Embedding models +Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. These natural language search capabilities underpin many of the context retrieval where we provide an LLM with relevant data it needs to effectively respond to a query. + +The Embeddings class is a class designed for interfacing with text embedding +models. There are lots of embedding model providers (OpenAI, Cohere, Hugging +Face, etc) - this class is designed to provide a standard interface for all of +them. + +Embeddings create a vector representation of a piece of text. This is useful +because it means we can think about text in the vector space, and do things like +semantic search where we look for pieces of text that are most similar in the +vector space. + +The base Embeddings class in LangChain exposes two methods: one for embedding +documents and one for embedding a query. The former takes as input multiple +texts, while the latter takes a single text. The reason for having these as two +separate methods is that some embedding providers have different embedding +methods for documents (to be searched over) vs queries (the search query +itself). + +For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models). + +### Vector stores +One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search for you. + +Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before +similarity search, allowing you more control over returned documents. + +Vector stores can be converted to the retriever interface by doing: + +For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores). + +### Retrievers +A retriever is an interface that returns documents given an unstructured query. +It is more general than a vector store. A retriever does not need to be able to +store documents, only to return (or retrieve) it. Vector stores can be used as +the backbone of a retriever, but there are other types of retrievers as well. + +Retrievers accept a string query as input and return a list of Document's as output. + +The public API of the `BaseRetriever` class in LangChain is as follows: + +```dart +abstract interface class BaseRetriever { + Future> getRelevantDocuments(final String query); +} +``` + +For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers). + +### Tools +Tools are utilities designed to be called by a model. Their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. Tools are needed whenever you want a model to control parts of your code or call out to external APIs. +A tool consists of: + +1. The name of the tool. +2. A description of what the tool does. +3. A JSON schema defining the inputs to the tool. +4. A function (and, optionally, an async variant of the function). + +When a tool is bound to a model, the name, description and JSON schema are provided as context to the model. +Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs. + +To define a tool in dart, we use the `ToolSpec` class. +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; +final model = ChatOpenAI(apiKey: openaiApiKey); + +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'Tell me a joke about {foo}', +); + +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline for the joke', + }, + }, + 'required': ['setup', 'punchline'], + }, +); + +final chain = promptTemplate | + model.bind( + ChatOpenAIOptions( + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + +final res = await chain.invoke({'foo': 'bears'}); +print(res); +// ChatResult{ +// id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb, +// output: AIChatMessage{ +// content: , +// toolCalls: [ +// AIChatMessageToolCall{ +// id: call_JIhyfu6jdIXaDHfYzbBwCKdb, +// name: joke, +// argumentsRaw: {"setup":"Why don't bears like fast food?","punchline":"Because they can't catch it!"}, +// arguments: { +// setup: Why don't bears like fast food?, +// punchline: Because they can't catch it! +// }, +// } +// ], +// }, +// finishReason: FinishReason.stop, +// metadata: { +// model: gpt-4o-mini, +// created: 1714835806, +// system_fingerprint: fp_3b956da36b +// }, +// usage: LanguageModelUsage{ +// promptTokens: 77, +// responseTokens: 24, +// totalTokens: 101 +// }, +// streaming: false +// } +``` + +When designing tools to be used by a model, it is important to keep in mind that: + +- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models. +- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering. +- Simple, narrowly scoped tools are easier for models to use than complex tools. + +#### Related + +For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools). + +To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/). + +### Agents +By themselves, language models can't take actions - they just output text. +A big use case for LangChain is creating agents. Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be. +The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. + +### Callbacks +TODO: + + +### Techniques + +#### Streaming + +#### Function/tool calling + +#### Structured Output +LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide range of inputs, but for some use-cases, it can be useful to constrain the LLM's output to a specific format or structure. This is referred to as structured output. + + +#### Few-shot prompting + +#### Retrieval + +#### Text splitting + +#### Evaluation + +#### Tracing +##### \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/anthropic.md b/docs_v2/docs/05-integrations/anthropic.md new file mode 100644 index 00000000..b607ddc7 --- /dev/null +++ b/docs_v2/docs/05-integrations/anthropic.md @@ -0,0 +1,145 @@ +# ChatAnthropic + +Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API). + +## Setup + +The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests. + +The following models are available: +- `claude-3-5-sonnet-20240620` +- `claude-3-haiku-20240307` +- `claude-3-opus-20240229` +- `claude-3-sonnet-20240229` +- `claude-2.0` +- `claude-2.1` + +Mind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); + +print(res.output.content); +// -> 'The fruit in the image is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 123 +// 456789101 +// 112131415161 +// 718192021222 +// 324252627282 +// 930 +``` + +## Tool calling + +`ChatAnthropic` supports tool calling. + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + tools: [tool], + ), +); + +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/anyscale.md b/docs_v2/docs/05-integrations/anyscale.md new file mode 100644 index 00000000..9a71c30e --- /dev/null +++ b/docs_v2/docs/05-integrations/anyscale.md @@ -0,0 +1,84 @@ +# Anyscale + +[Anyscale](https://www.anyscale.com/) offers a unified OpenAI-compatible API for a broad range of [models](https://docs.endpoints.anyscale.com/guides/models/#chat-models) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Anyscale API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); +``` + +## Invoke + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'meta-llama/Llama-2-70b-chat-hf', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> "I love programming" se traduit en français sous la forme "J'aime passionnément la programmation" +``` + +## Stream + +```dart +final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: anyscaleApiKey, + baseUrl: 'https://api.endpoints.anyscale.com/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/firebase_vertex_ai.md b/docs_v2/docs/05-integrations/firebase_vertex_ai.md new file mode 100644 index 00000000..cd33daa2 --- /dev/null +++ b/docs_v2/docs/05-integrations/firebase_vertex_ai.md @@ -0,0 +1,190 @@ +# Vertex AI for Firebase + +The [Vertex AI Gemini API](https://firebase.google.com/docs/vertex-ai) gives you access to the latest generative AI models from Google: the Gemini models. If you need to call the Vertex AI Gemini API directly from your mobile or web app you can use the `ChatFirebaseVertexAI` class instead of the [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) class which is designed to be used on the server-side. + +`ChatFirebaseVertexAI` is built specifically for use with mobile and web apps, offering security options against unauthorized clients as well as integrations with other Firebase services. + +## Key capabilities + +- **Multimodal input**: The Gemini models are multimodal, so prompts sent to the Gemini API can include text, images (even PDFs), video, and audio. +- **Growing suite of capabilities**: You can call the Gemini API directly from your mobile or web app, build an AI chat experience, use function calling, and more. +- **Security for production apps**: Use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. +- **Robust infrastructure**: Take advantage of scalable infrastructure that's built for use with mobile and web apps, like managing structured data with Firebase database offerings (like Cloud Firestore) and dynamically setting run-time configurations with Firebase Remote Config. + +## Setup + +### 1. Set up a Firebase project + +Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/get-started?platform=flutter) for the latest information on how to set up the Vertex AI for Firebase in your Firebase project. + +In summary, you need to: +1. Upgrade your billing plan to the Blaze pay-as-you-go pricing plan. +2. Enable the required APIs (`aiplatform.googleapis.com` and `firebaseml.googleapis.com`). +3. Integrate the Firebase SDK into your app (if you haven't already). +4. Recommended: Enable Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. + +### 2. Add the LangChain.dart Google package + +Add the `langchain_google` package to your `pubspec.yaml` file. + +```yaml +dependencies: + langchain: {version} + langchain_google: {version} +``` + +Internally, `langchain_google` uses the [`firebase_vertexai`](https://pub.dev/packages/firebase_vertexai) SDK to interact with the Vertex AI for Firebase API. + +### 3. Initialize your Firebase app + +```yaml +await Firebase.initializeApp(); +``` + +### 4. Call the Vertex AI Gemini API + +```dart +final chatModel = ChatFirebaseVertexAI(); +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +> Check out the [sample project](https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase/example) to see a complete project using Vertex AI for Firebase. + +## Available models + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.0-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) for the updated list. + +## Multimodal support + +```dart +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatFirebaseVertexAI( + defaultOptions: ChatFirebaseVertexAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` + +## Prevent abuse with Firebase App Check + +You can use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/app-check) for more information. + +## Locations + +When initializing the Vertex AI service, you can optionally specify a location in which to run the service and access a model. If you don't specify a location, the default is us-central1. See the list of [available locations](https://firebase.google.com/docs/vertex-ai/locations?platform=flutter#available-locations). + +```dart +final chatModel = ChatFirebaseVertexAI( + location: 'us-central1', +); +``` + +## Alternatives + +- [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md): Use this class to call the Vertex AI Gemini API from the server-side. +- [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md): Use this class to call the "Google AI" version of the Gemini API that provides free-of-charge access (within limits and where available). This API is not intended for production use but for experimentation and prototyping. After you're familiar with how a Gemini API works, migrate to the Vertex AI for Firebase, which have many additional features important for mobile and web apps, like protecting the API from abuse using Firebase App Check. diff --git a/docs_v2/docs/05-integrations/gcp_vertex_ai.md b/docs_v2/docs/05-integrations/gcp_vertex_ai.md new file mode 100644 index 00000000..5417aab5 --- /dev/null +++ b/docs_v2/docs/05-integrations/gcp_vertex_ai.md @@ -0,0 +1,116 @@ +# GCP Chat Vertex AI + +Wrapper around [GCP Vertex AI chat models](https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts) API (aka PaLM API for chat). + +## Set up your Google Cloud Platform project + +1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). +2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). +3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). +4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations). + +### Authentication + +To create an instance of `ChatVertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package. + +To create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance. + +There are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details. + +Example using a service account JSON: + +```dart +final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(serviceAccountJson), +); +final authClient = await clientViaServiceAccount( + serviceAccountCredentials, + [ChatVertexAI.cloudPlatformScope], +); +final chatVertexAi = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', +); +``` + +The service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions): +- `aiplatform.endpoints.predict` + +The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is: +- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `ChatVertexAI.cloudPlatformScope`) + +See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control + +### Available models + +- `chat-bison` + * Max input token: 4096 + * Max output tokens: 1024 + * Training data: Up to Feb 2023 + * Max turns: 2500 +- `chat-bison-32k` + * Max input and output tokens combined: 32k + * Training data: Up to Aug 2023 + * Max turns: 2500 + +The previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models. + +### Model options + +You can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter. + +The default options can be overridden when calling the model using the `options` parameter. + +Example: +```dart +final chatModel = ChatVertexAI( + httpClient: authClient, + project: 'your-project-id', + defaultOptions: ChatVertexAIOptions( + temperature: 0.9, + ), +); +final result = await chatModel( + [ChatMessage.humanText('Hello')], + options: ChatVertexAIOptions( + temperature: 0.5, + ), +); +``` + +### Full example + +```dart +import 'package:langchain/langchain.dart'; +import 'package:langchain_google/langchain_google.dart'; + +void main() async { + final chat = ChatVertexAI( + httpClient: await _getAuthHttpClient(), + project: _getProjectId(), + defaultOptions: const ChatVertexAIOptions( + temperature: 0, + ), + ); + while (true) { + stdout.write('> '); + final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? ''); + final aiMsg = await chat([usrMsg]); + print(aiMsg.content); + } +} + +Future _getAuthHttpClient() async { + final serviceAccountCredentials = ServiceAccountCredentials.fromJson( + json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!), + ); + return clientViaServiceAccount( + serviceAccountCredentials, + [VertexAI.cloudPlatformScope], + ); +} + +String _getProjectId() { + return Platform.environment['VERTEX_AI_PROJECT_ID']!; +} +``` diff --git a/docs_v2/docs/05-integrations/googleai.md b/docs_v2/docs/05-integrations/googleai.md new file mode 100644 index 00000000..033c7672 --- /dev/null +++ b/docs_v2/docs/05-integrations/googleai.md @@ -0,0 +1,149 @@ +# ChatGoogleGenerativeAI + +Wrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemini API). + +## Setup + +To use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey). + +The following models are available: +- `gemini-1.5-flash`: + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-1.5-pro`: text / image -> text model + * text / image / audio -> text model + * Max input token: 1048576 + * Max output tokens: 8192 +- `gemini-pro-vision`: + * text / image -> text model + * Max input token: 12288 + * Max output tokens: 4096 +- `gemini-1.0-pro` (or `gemini-pro`): + * text -> text model + * Max input token: 30720 + * Max output tokens: 2048 + +Mind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/models) for the updated list. + +## Usage + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chatPrompt = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, 'Text to translate:\n{text}'), +]); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore programmer.' +``` + +## Multimodal support + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); +final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), +); +print(res.output.content); +// -> 'That is an apple.' +``` + +## Streaming + +```dart +final apiKey = Platform.environment['GOOGLEAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatGoogleGenerativeAI( + apiKey: apiKey, + defaultOptions: const ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '30'}); +await stream.forEach(print); +// 1 +// 2345678910111213 +// 1415161718192021 +// 222324252627282930 +``` + +## Tool calling + +`ChatGoogleGenerativeAI` supports tool calling. + +Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools. + +Example: +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + }, + 'required': ['location'], + }, +); +final chatModel = ChatGoogleGenerativeAI( + defaultOptions: ChatGoogleGenerativeAIOptions( + model: 'gemini-1.5-pro', + temperature: 0, + tools: [tool], + ), +); +final res = await model.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +``` diff --git a/docs_v2/docs/05-integrations/index.mdx b/docs_v2/docs/05-integrations/index.mdx new file mode 100644 index 00000000..35f38dfc --- /dev/null +++ b/docs_v2/docs/05-integrations/index.mdx @@ -0,0 +1,56 @@ +--- +sidebar_position: 0 +index: auto +--- +# Integrations + +> If you'd like to write your own integration, see Extending Langchain. + +The following table contains the list of existing Langchain.dart integration packages. To install a specific integration, see [Installing Langchain components](/docs/03-how_to/01-installation.md) + + +

+ +

+ +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | +| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | +| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components and utilities | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4o, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | + +Functionality provided by each integration package: + +| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | +|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| +| [langchain_community](https://pub.dev/packages/langchain_community) | | | | ✔ | | | ✔ | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | +| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | + +The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: + +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | + diff --git a/docs_v2/docs/05-integrations/mistralai.md b/docs_v2/docs/05-integrations/mistralai.md new file mode 100644 index 00000000..14f21fb1 --- /dev/null +++ b/docs_v2/docs/05-integrations/mistralai.md @@ -0,0 +1,76 @@ +# ChatMistralAI + +Wrapper around [Mistral AI](https://mistral.ai/) Chat Completions API. + +Mistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production. + +> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai). + +## Setup + +To use `ChatMistralAI` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/). + +The following models are available at the moment: +- `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct). It only works in English and obtains 7.6 on MT-Bench. +- `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish and code and obtains 8.3 on MT-Bench. +- `mistral-medium`: a prototype model, that is currently among the top serviced models available based on standard benchmarks. It masters English/French/Italian/German/Spanish and code and obtains a score of 8.6 on MT-Bench. + +## Usage + +```dart +final chatModel = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-small', + temperature: 0, + ), +); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); +final chatPrompt = ChatPromptTemplate.fromPromptMessages( + [systemMessagePrompt, humanMessagePrompt], +); + +final chain = chatPrompt | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Streaming + +```dart +final promptTemplate = ChatPromptTemplate.fromPromptMessages([ + SystemChatMessagePromptTemplate.fromTemplate( + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + HumanChatMessagePromptTemplate.fromTemplate( + 'List the numbers from 1 to {max_num}', + ), +]); +final chat = ChatMistralAI( + apiKey: 'apiKey', + defaultOptions: ChatMistralAIOptions( + model: 'mistral-medium', + temperature: 0, + ), +); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 12 +// 345 +// 67 +// 89 +``` diff --git a/docs_v2/docs/05-integrations/ollama.md b/docs_v2/docs/05-integrations/ollama.md new file mode 100644 index 00000000..e6cc5907 --- /dev/null +++ b/docs_v2/docs/05-integrations/ollama.md @@ -0,0 +1,462 @@ +# ChatOllama + +Wrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion. + +Ollama allows you to run open-source large language models, such as Llama 3.1 or Gemma 2, locally. + +Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. + +## Setup + +Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: + +1. Download and install [Ollama](https://ollama.ai) +2. Fetch a model via `ollama pull ` + * e.g., for Llama 3: `ollama pull llama3.1` +3. Instantiate the `ChatOllama` class with the downloaded model. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + ), +); +``` + +For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library). + +### Ollama base URL + +By default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + baseUrl: 'https://your-remote-server-where-ollama-is-running.com', + model: 'llama3.1', + ), +); +``` + +## Usage + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'La traduction est : "J'aime le programming.' +``` + +### Streaming + +Ollama supports streaming the output as the model generates it. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), +); +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava). + +You can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported. + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llava', + temperature: 0, + ), +); +final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), +); +final res = await chatModel.invoke(PromptValue.chat([prompt])); +print(res.output.content); +// -> 'An Apple' +``` + +### Tool calling + +`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.). + +**Notes:** +- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.1`](https://ollama.com/library/llama3.1) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)). +- At the moment, small models like `llama3.1` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.1:70b` or `llama3.1:405b`. + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +As you can see, `ChatOllama` support calling multiple tools in a single request. + +If you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter: + +```dart +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'get_current_weather'), + ), +); +``` + +**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance). + +### JSON mode + +You can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data. + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'), + (ChatMessageType.human, '{question}'), +]); +final chat = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + format: OllamaResponseFormat.json, + ), +); + +final chain = Runnable.getMapFromInput('question') + .pipe(promptTemplate) + .pipe(chat) + .pipe(JsonOutputParser()); + +final res = await chain.invoke( + 'What is the population of Spain, The Netherlands, and France?', +); +print(res); +// {Spain: 46735727, The Netherlands: 17398435, France: 65273538} +``` + +## Examples + +### Answering questions with data from an external API + +Imagine you have an API that provides flight times between two cities: + +```dart +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} +``` + +Using the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question. + +```dart +const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + tools: [getFlightTimesTool], + ), +); + +final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), +]; + +// First API call: Send the query and function description to the model +final response = await chatModel.invoke(PromptValue.chat(messages)); + +messages.add(response.output); + +// Check if the model decided to use the provided function +if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; +} + +// Process function calls made by the model +for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, + ), + ); +} + +// Second API call: Get final response from the model +final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); +print(finalResponse.output.content); +// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. +``` + +### Extracting structured data with tools + +A useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage. + +```dart +const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, +); + +final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + options: ChatOllamaOptions( + model: 'llama3.1', + temperature: 0, + ), + tools: [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), +); + +final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + +final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + +final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', +); +final extractedData = res.first.arguments; +print(extractedData); +// { +// people: [ +// { +// name: Alex, +// height: 152, +// hair_color: blonde +// }, +// { +// name: Claudia, +// height: 183, +// hair_color: orange +// } +// ] +// } +``` + +### RAG (Retrieval-Augmented Generation) pipeline + +We can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`. + +```dart +// 1. Create a vector store and add documents to it +final vectorStore = MemoryVectorStore( + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), +); +await vectorStore.addDocuments( + documents: [ + Document(pageContent: 'LangChain was created by Harrison'), + Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'), + ], +); + +// 2. Construct a RAG prompt template +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'Answer the question based on only the following context:\n{context}'), + (ChatMessageType.human, '{question}'), +]); + +// 3. Define the model to use and the vector store retriever +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions(model: 'llama3.1'), +); +final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: VectorStoreSimilaritySearch(k: 1), + ), +); + +// 4. Create a Runnable that combines the retrieved documents into a single string +final docCombiner = Runnable.mapInput, String>((docs) { + return docs.map((final d) => d.pageContent).join('\n'); +}); + +// 4. Define the RAG pipeline +final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), +}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser()); + +// 5. Run the pipeline +final res = await chain.invoke('Who created LangChain.dart?'); +print(res); +// Based on the context provided, David created LangChain.dart. +``` diff --git a/docs_v2/docs/05-integrations/open_router.md b/docs_v2/docs/05-integrations/open_router.md new file mode 100644 index 00000000..c2d63555 --- /dev/null +++ b/docs_v2/docs/05-integrations/open_router.md @@ -0,0 +1,157 @@ +# OpenRouter + +[OpenRouter](https://openrouter.ai/) offers a unified OpenAI-compatible API for a broad range of [models](https://openrouter.ai/models). + +You can also let users pay for their own models via their [OAuth PKCE](https://openrouter.ai/docs#oauth) flow. + +You can consume OpenRouter API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://openrouter.ai/api/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +OpenRouter allows you to specify an optional `HTTP-Referer` header to identify your app and make it discoverable to users on openrouter.ai. You can also include an optional `X-Title` header to set or modify the title of your app. + +```dart + final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + headers: { + 'HTTP-Referer': 'com.myapp', + 'X-Title': 'OpenRouterTest', + }, + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); +``` + +## Invoke + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime la programmation.' +``` + +## Stream + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/mistral-small', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456789 +``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/docs_v2/docs/05-integrations/openai.md b/docs_v2/docs/05-integrations/openai.md new file mode 100644 index 00000000..6b3ccbbc --- /dev/null +++ b/docs_v2/docs/05-integrations/openai.md @@ -0,0 +1,372 @@ +# OpenAI + +This notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html). + +OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models). + +> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`. + +## Setup + +To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package. + +### Credentials + +Head to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys). + +### Installation + +The LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package: + +```yaml +dart pub add langchain_openai +``` + +## Usage + +### Instantiation + +Now we can instantiate our model object and generate chat completions: + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o' + temperature: 0, + // ...other options + ), +); +``` + +If you are using a proxy, you can override the base URL, headers, and other options: + +```dart +final client = ChatOpenAI( + baseUrl: 'https://my-proxy.com', + headers: {'x-my-proxy-header': 'value'}, +); +``` + +### Invocation + +Now you can generate completions by calling the `invoke` method: + +```dart +final messages = [ + ChatMessage.system('You are a helpful assistant that translates English to French.'), + ChatMessage.humanText('I love programming.'), +]; +final prompt = PromptValue.chat(messages); +final res = await llm.invoke(prompt); +// -> 'J'adore la programmation.' +``` + +### Chaining + +We can chain our model with a prompt template or output parser to create a more complex pipeline: + +```dart +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'), + (ChatMessageType.human, '{text}'), +]); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'adore la programmation.' +``` + +### Streaming + +OpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. + +```dart +final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates([ + (ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chat = ChatOpenAI(apiKey: openaiApiKey); + +final chain = promptTemplate.pipe(chat).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 123 +// 456 +// 789 +``` + +### Multimodal support + +OpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them. + +You can send the image as a base64-encoded string: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image + ), + ]), + ), +]); +``` + +Or you can send the URL where the image is hosted: + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system('You are a helpful assistant.'), + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg', + ), + ]), + ), +]); +``` + +### Tool calling + +OpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use "tool calling" and "function calling" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally. + + +```dart +const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, +); + +final chatModel = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'gpt-4o' + temperature: 0, + tools: [tool], + ), +); + +final res = await chatModel.invoke( + PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'), +); +print(res.output.toolCalls); +// [AIChatMessageToolCall{ +// id: a621064b-03b3-4ca6-8278-f37504901034, +// name: get_current_weather, +// arguments: {location: Boston, US}, +// }, +// AIChatMessageToolCall{ +// id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, +// name: get_current_weather, +// arguments: {location: Madrid, ES}, +// }] +``` + +Notice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic. + +You can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one: + +```dart +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` + +### Structured Outputs + +[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), + ), + ), +); + +final res = await chatModel.invoke(prompt); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +When you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute. + +### JSON mode + +When [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map. + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case. + +```dart +final prompt = PromptValue.chat([ + ChatMessage.system( + "Extract the 'name' and 'origin' of any companies mentioned in the " + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), +]); +final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: 'gpt-4-turbo', + temperature: 0, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), +); +final chain = llm.pipe(JsonOutputParser()); +final res = await chain.invoke(prompt); +print(res); +// { +// "companies": [ +// { +// "name": "Google", +// "origin": "USA" +// }, +// { +// "name": "Deepmind", +// "origin": "UK" +// } +// ] +// } +``` + +### Fine-tuning + +You can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter. + +This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: + +```dart +final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR' + ), +); +``` + +## API reference + +For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest). diff --git a/docs_v2/docs/05-integrations/prem.md b/docs_v2/docs/05-integrations/prem.md new file mode 100644 index 00000000..65258f7c --- /dev/null +++ b/docs_v2/docs/05-integrations/prem.md @@ -0,0 +1,24 @@ +# Prem App + +You can easily run local models using [Prem app](https://www.premai.io/#PremApp). +It creates a local server that exposes a REST API with the same interface as +the OpenAI API. + +```dart +const localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL +final chat = ChatOpenAI(baseUrl: localUrl); + +const template = 'You are a helpful assistant that translates {input_language} to {output_language}.'; +final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template); +const humanTemplate = '{text}'; +final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate); + +final chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]); +final formattedPrompt = chatPrompt.formatPrompt({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.' +}).toChatMessages(); + +final output = chat.predictMessages(formattedPrompt); +``` diff --git a/docs_v2/docs/05-integrations/together_ai.md b/docs_v2/docs/05-integrations/together_ai.md new file mode 100644 index 00000000..10567455 --- /dev/null +++ b/docs_v2/docs/05-integrations/together_ai.md @@ -0,0 +1,84 @@ +# Together AI + +[Together AI](https://www.together.ai) offers a unified OpenAI-compatible API for a broad range of [models](https://api.together.xyz/playground) running serverless or on your own dedicated instances. + +It also allows to fine-tune models on your own data or train new models from scratch. + +You can consume Together AI API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API. + +The only difference is that you need to change the base URL to `https://api.together.xyz/v1`: + +```dart +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); +``` + +## Invoke + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.', + ), + (ChatMessageType.human, '{text}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate | chatModel | StringOutputParser(); + +final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', +}); +print(res); +// -> 'J'aime programmer' +``` + +## Stream + +```dart +final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY']; + +final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), +]); + +final chatModel = ChatOpenAI( + apiKey: togetherAiApiKey, + baseUrl: 'https://api.together.xyz/v1', + defaultOptions: const ChatOpenAIOptions( + model: 'mistralai/Mistral-7B-Instruct-v0.2', + ), +); + +final chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser()); + +final stream = chain.stream({'max_num': '9'}); +await stream.forEach(print); +// 1 +// 2 +// 3 +// ... +// 9 +``` diff --git a/docs_v2/docs/05-integrations/tools/index.mdx b/docs_v2/docs/05-integrations/tools/index.mdx new file mode 100644 index 00000000..211b41de --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/index.mdx @@ -0,0 +1,5 @@ +--- +sidebar_position: 0 +index: auto +--- +# Tools \ No newline at end of file diff --git a/docs_v2/docs/05-integrations/tools/tavily_search.md b/docs_v2/docs/05-integrations/tools/tavily_search.md new file mode 100644 index 00000000..2f3d461d --- /dev/null +++ b/docs_v2/docs/05-integrations/tools/tavily_search.md @@ -0,0 +1,13 @@ +# Tavily Search + +[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed. + +## Setup + +The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package itself. + +```bash +dart pub add langchain langchain_community +``` + +We also need to set our Tavily API key. \ No newline at end of file diff --git a/docs_v2/docusaurus.config.js b/docs_v2/docusaurus.config.js new file mode 100644 index 00000000..1376cddc --- /dev/null +++ b/docs_v2/docusaurus.config.js @@ -0,0 +1,130 @@ +// @ts-check +// `@type` JSDoc annotations allow editor autocompletion and type checking +// (when paired with `@ts-check`). +// There are various equivalent ways to declare your Docusaurus config. +// See: https://docusaurus.io/docs/api/docusaurus-config + +import { themes as prismThemes } from 'prism-react-renderer'; + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'LangChain.dart', + tagline: 'LangChain.dart Docs', + favicon: 'img/favicon.ico', + + // Set the production url of your site here + url: 'https://beta.langchaindart.dev/', + // Set the // pathname under which your site is served + // For GitHub pages deployment, it is often '//' + baseUrl: '/', + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'davidmigloz', // Usually your GitHub org/user name. + projectName: 'langchain_dart', // Usually your repo name. + + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + + // Even if you don't use internationalization, you can use this field to set + // useful metadata like html lang. For example, if your site is Chinese, you + // may want to replace "en" with "zh-Hans". + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + sidebarPath: './sidebars.js', + // Please change this to your repo. + // Remove this to remove the "edit this page" links. + editUrl: + 'https://github.com/davidmigloz/langchain_dart/tree/main/docs_v2/', + }, + theme: { + customCss: './src/css/custom.css', + }, + }), + ], + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + // Replace with your project's social card + image: 'img/langchain.dart.png', + navbar: { + title: 'LangChain.dart', + logo: { + alt: 'LangChain Logo', + src: 'img/favicon.ico', + }, + items: [ + { + to: '/docs/integrations', + position: 'left', + label: 'Integrations', + }, + {to: 'https://blog.langchaindart.dev/blog', label: 'Blog', position: 'left'}, + { + href: 'https://github.com/davidmigloz/langchain_dart/', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + { + label: 'Tutorial', + to: '/docs/intro', + }, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Discord', + href: 'https://discord.gg/x4qbhqecVR', + }, + { + label: 'pub.dev', + href: 'https://pub.dev/packages/langchain', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Blog', + to: '/blog', + }, + { + label: 'GitHub', + href: 'https://github.com/davidmigloz/langchain_dart', + }, + ], + }, + ], + copyright: `Made with 💙 by the LangChain.dart Community`, + }, + prism: { + theme: prismThemes.vsLight, + darkTheme: prismThemes.vsDark, + additionalLanguages: ['yaml','dart','bash'], + }, + }), +}; + +export default config; diff --git a/docs_v2/firebase.json b/docs_v2/firebase.json new file mode 100644 index 00000000..340ed5b7 --- /dev/null +++ b/docs_v2/firebase.json @@ -0,0 +1,16 @@ +{ + "hosting": { + "public": "build", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ] + } +} diff --git a/docs_v2/package-lock.json b/docs_v2/package-lock.json new file mode 100644 index 00000000..ebb2ff9e --- /dev/null +++ b/docs_v2/package-lock.json @@ -0,0 +1,14678 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "langchain-dart", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", + "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", + "@algolia/autocomplete-shared": "1.9.3" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", + "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", + "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", + "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/cache-browser-local-storage": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", + "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/cache-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", + "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==" + }, + "node_modules/@algolia/cache-in-memory": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", + "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", + "dependencies": { + "@algolia/cache-common": "4.24.0" + } + }, + "node_modules/@algolia/client-account": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", + "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", + "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.5.3.tgz", + "integrity": "sha512-3rdSdreBL2LGYu4DWmUGlMhaGy1vy36Xp42LdbTFsW/y3bhW5viptMHI5A3PKT0hPEMZUn+te1iM/EWvLUuVGQ==", + "peer": true, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", + "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.5.3.tgz", + "integrity": "sha512-qrokD+uoNxchbiF9aP8niQd/9SZ6BgYg4WaesFaubHhr9DFvwGm4IePEMha8vQcc3fSsY6uL+gOtKB3J6RF0NQ==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3", + "@algolia/requester-browser-xhr": "5.5.3", + "@algolia/requester-fetch": "5.5.3", + "@algolia/requester-node-http": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + }, + "node_modules/@algolia/logger-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", + "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==" + }, + "node_modules/@algolia/logger-console": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", + "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", + "dependencies": { + "@algolia/logger-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", + "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.5.3.tgz", + "integrity": "sha512-LsfUPokiXEpvlYF7SwNjyyjkUX7IoW7oIhH6WkDUD4PCfEZkFbVplGQA0UrCiWOAbpb25P7mmP6+ldwjwqW6Kg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", + "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==" + }, + "node_modules/@algolia/requester-fetch": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.5.3.tgz", + "integrity": "sha512-RKaliEFHtVeD/fMxwrApkcI6ZxR+mU6pZna29r3NwVMpCXTJWWtlMpQmbr1RHzUsaAlpfv9pfGJN4nYPE8XWEg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-node-http": { + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.5.3.tgz", + "integrity": "sha512-2wU+HlTVrVce7BMW2b3Gd62btk8B0jBbuKYYzu3OFeBD/aZa88eHABtjcjQCdw3x+wvkIPEc56UsZx9eHYLebg==", + "peer": true, + "dependencies": { + "@algolia/client-common": "5.5.3" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/transporter": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", + "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", + "dependencies": { + "@algolia/cache-common": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.7.tgz", + "integrity": "sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.7.tgz", + "integrity": "sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helpers": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", + "dependencies": { + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", + "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", + "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.7.tgz", + "integrity": "sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.7.tgz", + "integrity": "sha512-kTkaDl7c9vO80zeX1rJxnuRpEsD5tA81yh11X1gQo+PhSti3JS+7qeZo9U4RHobKRiFPKaGK3svUAeb8D0Q7eg==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.24.7.tgz", + "integrity": "sha512-03TCmXy2FtXJEZfbXDTSqq1fRJArk7lX9DOFC/47VthYcxyIOx+eXQmdo6DOQvrbpIix+KfXwvuXdFDZHxt+rA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.7.tgz", + "integrity": "sha512-LGeMaf5JN4hAT471eJdBs/GK1DoYIJ5GCtZN/EsL6KUiiDZOvO/eKE11AMZJa2zP4zk4qe9V2O/hxAmkRc8p6w==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.7.tgz", + "integrity": "sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", + "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.24.7.tgz", + "integrity": "sha512-9pKLcTlZ92hNZMQfGCHImUpDOlAgkkpqalWEeftW5FBya75k8Li2ilerxkM/uBEj01iBZXcCIB/bwvDYgWyibA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-wrap-function": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.7.tgz", + "integrity": "sha512-qTAxxBM81VEyoAY0TtLrx1oAEJc09ZK67Q9ljQToqCnA+55eNwCORaxlKyu+rNfX86o8OXRUSNUnrtsAZXM9sg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.7", + "@babel/helper-optimise-call-expression": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", + "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.7.tgz", + "integrity": "sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.7.tgz", + "integrity": "sha512-N9JIYk3TD+1vq/wn77YnJOqMtfWhNewNE+DJV4puD2X7Ew9J4JvrzrFDfTfyv5EgEXVy9/Wt8QiOErzEmv5Ifw==", + "dependencies": { + "@babel/helper-function-name": "^7.24.7", + "@babel/template": "^7.24.7", + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.7.tgz", + "integrity": "sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==", + "dependencies": { + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.7.tgz", + "integrity": "sha512-TiT1ss81W80eQsN+722OaeQMY/G4yTb4G9JrqeiDADs3N8lbPMGldWi9x8tyqCW5NLx1Jh2AvkE6r6QvEltMMQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.7.tgz", + "integrity": "sha512-unaQgZ/iRu/By6tsjMZzpeBZjChYfLYry6HrEXPoz3KmfF0sVBQ1l8zKMQ4xRGLWVsjuvB8nQfjNP/DcfEOCsg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", + "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.7.tgz", + "integrity": "sha512-utA4HuR6F4Vvcr+o4DnjL8fCOlgRFGbeeBEGNg3ZTrLFw6VWG5XmUrvcQ0FjIYMU2ST4XcR2Wsp7t9qOAPnxMg==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.7.tgz", + "integrity": "sha512-Ec3NRUMoi8gskrkBe3fNmEQfxDvY8bgfQpz6jlk/41kX9eUjvpyqWU7PBP/pLAvMaSQjbMNKJmvX57jP+M6bPg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.7.tgz", + "integrity": "sha512-hbX+lKKeUMGihnK8nvKqmXBInriT3GVjzXKFriV3YC6APGxMbP8RZNFwy91+hocLXq90Mta+HshoB31802bb8A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.7.tgz", + "integrity": "sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", + "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.7.tgz", + "integrity": "sha512-o+iF77e3u7ZS4AoAuJvapz9Fm001PuD2V3Lp6OSE4FYQke+cSewYtnek+THqGRWyQloRCyvWL1OkyfNEl9vr/g==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", + "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", + "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.7.tgz", + "integrity": "sha512-Nd5CvgMbWc+oWzBsuaMcbwjJWAcp5qzrbg69SZdHSP7AMY0AbWFqFO0WTFCA1jxhMCwodRwvRec8k0QUbZk7RQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.7.tgz", + "integrity": "sha512-vKbfawVYayKcSeSR5YYzzyXvsDFWU2mD8U5TFeXtbCPLFUqe7GyCgvO6XDHzje862ODrOwy6WCPmKeWHbCFJ4w==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", + "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.7.tgz", + "integrity": "sha512-CFbbBigp8ln4FU6Bpy6g7sE8B/WmCmzvivzUC6xDAdWVsjYTXijpuuGJmYkAaoWAzcItGKT3IOAbxRItZ5HTjw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", + "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/template": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.7.tgz", + "integrity": "sha512-19eJO/8kdCQ9zISOf+SEUJM/bAUIsvY3YDnXZTupUCQ8LgrWnsG/gFB9dvXqdXnRXMAM8fvt7b0CBKQHNGy1mw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", + "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", + "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", + "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", + "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", + "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", + "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.7.tgz", + "integrity": "sha512-U9FcnA821YoILngSmYkW6FjyQe2TyZD5pHt4EVIhmcTkrJw/3KqcrRSxuOo5tFZJi7TE19iDyI1u+weTI7bn2w==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", + "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.7.tgz", + "integrity": "sha512-vcwCbb4HDH+hWi8Pqenwnjy+UiklO4Kt1vfspcQYFhJdpthSnW8XvWGyDZWKNVrVbVViI/S7K9PDJZiUmP2fYQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", + "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", + "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", + "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.7.tgz", + "integrity": "sha512-iFI8GDxtevHJ/Z22J5xQpVqFLlMNstcLXh994xifFwxxGslr2ZXXLWgtBeLctOD63UFDArdvN6Tg8RFw+aEmjQ==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.7.tgz", + "integrity": "sha512-GYQE0tW7YoaN13qFh3O1NCY4MPkUiAH3fiF7UcV/I3ajmDKEdG3l+UOcbAm4zUE3gnvUU+Eni7XrVKo9eO9auw==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", + "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", + "dependencies": { + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", + "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", + "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", + "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", + "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", + "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", + "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", + "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.7.tgz", + "integrity": "sha512-tK+0N9yd4j+x/4hxF3F0e0fu/VdcxU18y5SevtyM/PCFlQvXbR0Zmlo2eBrKtVipGNFzpq56o8WsIIKcJFUCRQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", + "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.7.tgz", + "integrity": "sha512-COTCOkG2hn4JKGEKBADkA8WNb35TGkkRbI5iT845dB+NyqgO8Hn+ajPbSnIQznneJTa3d30scb6iz/DhH8GsJQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", + "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", + "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.25.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.1.tgz", + "integrity": "sha512-SLV/giH/V4SmloZ6Dt40HjTGTAIkxn33TVIHxNGNvo8ezMhrxBkzisj4op1KZYPIOHFLqhv60OHvX+YRu4xbmQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz", + "integrity": "sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.24.7.tgz", + "integrity": "sha512-+Dj06GDZEFRYvclU6k4bme55GKBEWUmByM/eoKuqg4zTNQHiApWRhQph5fxQB2wAEFvRzL1tOEj1RJ19wJrhoA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz", + "integrity": "sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz", + "integrity": "sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", + "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", + "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.7.tgz", + "integrity": "sha512-YqXjrk4C+a1kZjewqt+Mmu2UuV1s07y8kqcUf4qYLnoqemhR4gRQikhdAhSVJioMjVTu6Mo6pAbaypEA3jY6fw==", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.1", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", + "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", + "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", + "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", + "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.7.tgz", + "integrity": "sha512-VtR8hDy7YLB7+Pet9IarXjg/zgCMSF+1mNS/EQEiEaUPoFXCVsHG64SIxcaaI2zJgRiv+YmgaQESUfWAdbjzgg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.7.tgz", + "integrity": "sha512-iLD3UNkgx2n/HrjBesVbYX6j0yqn/sJktvbtKKgcaLIQ4bTTQ8obAypc1VpyHPD2y4Phh9zHOaAt8e/L14wCpw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", + "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", + "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", + "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.7.tgz", + "integrity": "sha512-2G8aAvF4wy1w/AGZkemprdGMRg5o6zPNhbHVImRz3lss55TYCBd6xStN19rt8XJHq20sqV0JbyWjOWwQRwV/wg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.7.tgz", + "integrity": "sha512-1YZNsc+y6cTvWlDHidMBsQZrZfEFjRIo/BZCT906PMdzOyXtSLTgqGdrpcuTDCXyd11Am5uQULtDIcCfnTc8fQ==", + "dependencies": { + "@babel/compat-data": "^7.24.7", + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.7", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.7", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.7", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.7", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.7", + "@babel/plugin-transform-async-generator-functions": "^7.24.7", + "@babel/plugin-transform-async-to-generator": "^7.24.7", + "@babel/plugin-transform-block-scoped-functions": "^7.24.7", + "@babel/plugin-transform-block-scoping": "^7.24.7", + "@babel/plugin-transform-class-properties": "^7.24.7", + "@babel/plugin-transform-class-static-block": "^7.24.7", + "@babel/plugin-transform-classes": "^7.24.7", + "@babel/plugin-transform-computed-properties": "^7.24.7", + "@babel/plugin-transform-destructuring": "^7.24.7", + "@babel/plugin-transform-dotall-regex": "^7.24.7", + "@babel/plugin-transform-duplicate-keys": "^7.24.7", + "@babel/plugin-transform-dynamic-import": "^7.24.7", + "@babel/plugin-transform-exponentiation-operator": "^7.24.7", + "@babel/plugin-transform-export-namespace-from": "^7.24.7", + "@babel/plugin-transform-for-of": "^7.24.7", + "@babel/plugin-transform-function-name": "^7.24.7", + "@babel/plugin-transform-json-strings": "^7.24.7", + "@babel/plugin-transform-literals": "^7.24.7", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", + "@babel/plugin-transform-member-expression-literals": "^7.24.7", + "@babel/plugin-transform-modules-amd": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-modules-systemjs": "^7.24.7", + "@babel/plugin-transform-modules-umd": "^7.24.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", + "@babel/plugin-transform-new-target": "^7.24.7", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", + "@babel/plugin-transform-numeric-separator": "^7.24.7", + "@babel/plugin-transform-object-rest-spread": "^7.24.7", + "@babel/plugin-transform-object-super": "^7.24.7", + "@babel/plugin-transform-optional-catch-binding": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7", + "@babel/plugin-transform-parameters": "^7.24.7", + "@babel/plugin-transform-private-methods": "^7.24.7", + "@babel/plugin-transform-private-property-in-object": "^7.24.7", + "@babel/plugin-transform-property-literals": "^7.24.7", + "@babel/plugin-transform-regenerator": "^7.24.7", + "@babel/plugin-transform-reserved-words": "^7.24.7", + "@babel/plugin-transform-shorthand-properties": "^7.24.7", + "@babel/plugin-transform-spread": "^7.24.7", + "@babel/plugin-transform-sticky-regex": "^7.24.7", + "@babel/plugin-transform-template-literals": "^7.24.7", + "@babel/plugin-transform-typeof-symbol": "^7.24.7", + "@babel/plugin-transform-unicode-escapes": "^7.24.7", + "@babel/plugin-transform-unicode-property-regex": "^7.24.7", + "@babel/plugin-transform-unicode-regex": "^7.24.7", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.7", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.7.tgz", + "integrity": "sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-transform-react-display-name": "^7.24.7", + "@babel/plugin-transform-react-jsx": "^7.24.7", + "@babel/plugin-transform-react-jsx-development": "^7.24.7", + "@babel/plugin-transform-react-pure-annotations": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", + "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.7.tgz", + "integrity": "sha512-eytSX6JLBY6PVAeQa2bFlDx/7Mmln/gaEpsit5a3WEvjGfiIytEsgAwuIXCPM0xvw0v0cJn3ilq0/TvXrW0kgA==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", + "dependencies": { + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.1.tgz", + "integrity": "sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==" + }, + "node_modules/@docsearch/react": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.1.tgz", + "integrity": "sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==", + "dependencies": { + "@algolia/autocomplete-core": "1.9.3", + "@algolia/autocomplete-preset-algolia": "1.9.3", + "@docsearch/css": "3.6.1", + "algoliasearch": "^4.19.1" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 19.0.0", + "react": ">= 16.8.0 < 19.0.0", + "react-dom": ">= 16.8.0 < 19.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", + "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", + "dependencies": { + "@babel/core": "^7.23.3", + "@babel/generator": "^7.23.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.22.9", + "@babel/preset-env": "^7.22.9", + "@babel/preset-react": "^7.22.5", + "@babel/preset-typescript": "^7.22.5", + "@babel/runtime": "^7.22.6", + "@babel/runtime-corejs3": "^7.22.6", + "@babel/traverse": "^7.22.8", + "@docusaurus/cssnano-preset": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "autoprefixer": "^10.4.14", + "babel-loader": "^9.1.3", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.2", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.31.1", + "css-loader": "^6.8.1", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", + "del": "^6.1.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "eval": "^0.1.8", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "html-minifier-terser": "^7.2.0", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.5.3", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.7.6", + "p-map": "^4.0.0", + "postcss": "^8.4.26", + "postcss-loader": "^7.3.3", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "rtl-detect": "^1.0.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.5", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "url-loader": "^4.1.1", + "webpack": "^5.88.1", + "webpack-bundle-analyzer": "^4.9.0", + "webpack-dev-server": "^4.15.1", + "webpack-merge": "^5.9.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@mdx-js/react": "^3.0.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", + "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", + "dependencies": { + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.4.38", + "postcss-sort-media-queries": "^5.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", + "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", + "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^1.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", + "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", + "dependencies": { + "@docusaurus/types": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", + "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "cheerio": "1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "reading-time": "^1.5.0", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", + "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", + "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", + "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^1.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", + "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", + "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", + "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", + "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", + "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/plugin-debug": "3.5.2", + "@docusaurus/plugin-google-analytics": "3.5.2", + "@docusaurus/plugin-google-gtag": "3.5.2", + "@docusaurus/plugin-google-tag-manager": "3.5.2", + "@docusaurus/plugin-sitemap": "3.5.2", + "@docusaurus/theme-classic": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-search-algolia": "3.5.2", + "@docusaurus/types": "3.5.2" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", + "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", + "dependencies": { + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.44", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.26", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", + "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", + "dependencies": { + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", + "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", + "dependencies": { + "@docsearch/react": "^3.5.2", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "algoliasearch": "^4.18.0", + "algoliasearch-helper": "^3.13.3", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", + "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/types": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", + "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", + "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@svgr/webpack": "^8.1.0", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "prompts": "^2.4.2", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", + "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", + "dependencies": { + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", + "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", + "dependencies": { + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "fs-extra": "^11.2.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.0.1.tgz", + "integrity": "sha512-eIQ4QTrOWyL3LWEe/bu6Taqzq2HQvHcyTMaOrI95P2/LmJE7AsfPfgJGuFLPVqBUE1BC1rik3VIhU+s9u72arA==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-to-js": "^2.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-estree": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "periscopic": "^3.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz", + "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", + "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.25", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.25.tgz", + "integrity": "sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "camelcase": "^6.2.0", + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", + "dependencies": { + "@babel/types": "^7.21.3", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", + "dependencies": { + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", + "dependencies": { + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/acorn": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", + "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.5", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz", + "integrity": "sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.14", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", + "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/@types/node": { + "version": "20.14.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.7.tgz", + "integrity": "sha512-uTr2m2IbJJucF3KUxgnGOZvYbN0QgkGyWxG6973HCpMYFy2KfcgYuIwkJQMQkt1VbBMlvWRbpshFTLxnxCZjKQ==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", + "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" + }, + "node_modules/@types/prismjs": { + "version": "1.26.4", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.4.tgz", + "integrity": "sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" + }, + "node_modules/@types/qs": { + "version": "6.9.15", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.15.tgz", + "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" + }, + "node_modules/@types/react": { + "version": "18.3.3", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", + "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + }, + "node_modules/@types/ws": { + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", + "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", + "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", + "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.12.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", + "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-opt": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1", + "@webassemblyjs/wast-printer": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", + "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", + "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-buffer": "1.12.1", + "@webassemblyjs/wasm-gen": "1.12.1", + "@webassemblyjs/wasm-parser": "1.12.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", + "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", + "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", + "dependencies": { + "@webassemblyjs/ast": "1.12.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.0.tgz", + "integrity": "sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.3", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", + "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", + "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", + "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-account": "4.24.0", + "@algolia/client-analytics": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-personalization": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/recommend": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.22.5", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", + "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001662", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001662.tgz", + "integrity": "sha512-sgMUVwLmGseH8ZIrm1d51UbrhqMCH3jvS7gF/M6byuHOnKyLOBL7W8yz5V02OHwgLGA36o/AFhWzzh4uc5aqTA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.1.tgz", + "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.1.tgz", + "integrity": "sha512-9TNiImhKvQqSUkOvk/mMRZzOANTiEVC7WaBNhHcKM7x+/5E1l5NvsysR19zuDQScE8k+kfQXWRN3AtS/eOSHpg==", + "dependencies": { + "browserslist": "^4.23.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.1.tgz", + "integrity": "sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-declaration-sorter": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", + "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-select": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", + "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", + "dependencies": { + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", + "dependencies": { + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-default": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-utils": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + }, + "node_modules/debug": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", + "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.26", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.26.tgz", + "integrity": "sha512-Z+OMe9M/V6Ep9n/52+b7lkvYEps26z4Yz3vjWL1V61W0q+VLF1pOHhMY17sa4roz4AWmULSI8E6SAojZA5L0YQ==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.3.tgz", + "integrity": "sha512-i1gCgmR9dCl6Vil6UKPI/trA69s08g/syhiDK9TG0Nf1RJjjFI+AzoWW7sPufzkgYAn861skuCwJa0pIIHYxvg==" + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz", + "integrity": "sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.0.tgz", + "integrity": "sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.10", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", + "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^8.0.0", + "property-information": "^6.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", + "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", + "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==" + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", + "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", + "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", + "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.44", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", + "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", + "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.14.0.tgz", + "integrity": "sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.8.0.tgz", + "integrity": "sha512-vJranOAJrI/llyWGRQqiDM+adrw+k83fvmmx3+nV47g3+36xM15jE+zyZ6Ffel02+xSvuM0b2GDRosXZkbb6wA==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", + "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", + "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", + "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", + "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz", + "integrity": "sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==", + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz", + "integrity": "sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", + "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", + "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", + "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz", + "integrity": "sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==", + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mrmime": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", + "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", + "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", + "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", + "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "dependencies": { + "domhandler": "^5.0.2", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "colord": "^2.9.3", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-convert-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-comments": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-empty": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-unused": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", + "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", + "dependencies": { + "cosmiconfig": "^8.3.5", + "jiti": "^1.20.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-merge-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^6.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-rules": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", + "dependencies": { + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-params": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", + "dependencies": { + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.5.tgz", + "integrity": "sha512-6MieY7sIfTK0hYfafw1OMEG+2bg8Q1ocHCpoWLqOKj3JXlKu4G7btkmM/B7lFubYkYWmRSPLZi5chid63ZaZYw==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.0.tgz", + "integrity": "sha512-oq+g1ssrsZOsx9M96c5w8laRmvEu9C3adDSjI8oTcbfkrTE8hx/zfyobUoWIxaKPO8bt6S62kxpw5GqypEw1QQ==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-string": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-url": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-ordered-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.0.tgz", + "integrity": "sha512-UMz42UD0UY0EApS0ZL9o1XnLhSTtvvvLe5Dc2H2O56fvRZi+KulDyf5ctDhhtYJBGKStV2FL1fy6253cmLgqVQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", + "dependencies": { + "sort-css-media-queries": "2.2.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.23" + } + }, + "node_modules/postcss-svgo": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^3.2.0" + }, + "engines": { + "node": "^14 || ^16 || >= 18" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.3.1.tgz", + "integrity": "sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/react-dev-utils/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-json-view-lite": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz", + "integrity": "sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "dependencies": { + "@types/react": "*" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reading-time": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", + "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", + "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", + "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", + "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.0.1.tgz", + "integrity": "sha512-3Pz3yPQ5Rht2pM5R+0J2MrGoBSrzf+tJG94N+t/ilfdh8YLyyKYtidAYwTveB20BoHAcwIopOUqhcmh2F7hGYA==", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.1.tgz", + "integrity": "sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", + "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" + }, + "node_modules/rtlcss": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sax": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.2.tgz", + "integrity": "sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", + "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", + "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.31.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.1.tgz", + "integrity": "sha512-37upzU1+viGvuFtBo9NPufCb9dwM0+l9hMxYyWfBA+fbwrPqNJAhbZ6W47bBFnZHKHTUBnMvi87434qq+qnxOg==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.20", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.26.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz", + "integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/watchpack": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.1.tgz", + "integrity": "sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.94.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", + "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", + "dependencies": { + "@types/estree": "^1.0.5", + "@webassemblyjs/ast": "^1.12.1", + "@webassemblyjs/wasm-edit": "^1.12.1", + "@webassemblyjs/wasm-parser": "^1.12.1", + "acorn": "^8.7.1", + "acorn-import-attributes": "^1.9.5", + "browserslist": "^4.21.10", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.1", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.10", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.4", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/webpack/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs_v2/package.json b/docs_v2/package.json new file mode 100644 index 00000000..64b6fe3b --- /dev/null +++ b/docs_v2/package.json @@ -0,0 +1,44 @@ +{ + "name": "langchain-dart", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" + }, + "dependencies": { + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/types": "^3.5.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=18.0" + } +} diff --git a/docs_v2/sidebars.js b/docs_v2/sidebars.js new file mode 100644 index 00000000..72e4f826 --- /dev/null +++ b/docs_v2/sidebars.js @@ -0,0 +1,30 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + integrations: [{type: 'autogenerated', dirName: '.'}] + + // tutorialSidebar: [ + // 'intro', + // 'tutorials/index', + // 'how_to/index', + // 'concepts', + // 'integrations/index', + // ], + +}; + +export default sidebars; \ No newline at end of file diff --git a/docs_v2/src/components/HomepageFeatures/index.js b/docs_v2/src/components/HomepageFeatures/index.js new file mode 100644 index 00000000..acc76219 --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/index.js @@ -0,0 +1,64 @@ +import clsx from 'clsx'; +import Heading from '@theme/Heading'; +import styles from './styles.module.css'; + +const FeatureList = [ + { + title: 'Easy to Use', + Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, + description: ( + <> + Docusaurus was designed from the ground up to be easily installed and + used to get your website up and running quickly. + + ), + }, + { + title: 'Focus on What Matters', + Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, + description: ( + <> + Docusaurus lets you focus on your docs, and we'll do the chores. Go + ahead and move your docs into the docs directory. + + ), + }, + { + title: 'Powered by React', + Svg: require('@site/static/img/undraw_docusaurus_react.svg').default, + description: ( + <> + Extend or customize your website layout by reusing React. Docusaurus can + be extended while reusing the same header and footer. + + ), + }, +]; + +function Feature({Svg, title, description}) { + return ( +
+
+ +
+
+ {title} +

{description}

+
+
+ ); +} + +export default function HomepageFeatures() { + return ( +
+
+
+ {FeatureList.map((props, idx) => ( + + ))} +
+
+
+ ); +} diff --git a/docs_v2/src/components/HomepageFeatures/styles.module.css b/docs_v2/src/components/HomepageFeatures/styles.module.css new file mode 100644 index 00000000..b248eb2e --- /dev/null +++ b/docs_v2/src/components/HomepageFeatures/styles.module.css @@ -0,0 +1,11 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureSvg { + height: 200px; + width: 200px; +} diff --git a/docs_v2/src/css/custom.css b/docs_v2/src/css/custom.css new file mode 100644 index 00000000..2bc6a4cf --- /dev/null +++ b/docs_v2/src/css/custom.css @@ -0,0 +1,30 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #2e8555; + --ifm-color-primary-dark: #29784c; + --ifm-color-primary-darker: #277148; + --ifm-color-primary-darkest: #205d3b; + --ifm-color-primary-light: #33925d; + --ifm-color-primary-lighter: #359962; + --ifm-color-primary-lightest: #3cad6e; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #21af90; + --ifm-color-primary-darker: #1fa588; + --ifm-color-primary-darkest: #1a8870; + --ifm-color-primary-light: #29d5b0; + --ifm-color-primary-lighter: #32d8b4; + --ifm-color-primary-lightest: #4fddbf; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} diff --git a/docs_v2/src/pages/index.js b/docs_v2/src/pages/index.js new file mode 100644 index 00000000..176f838c --- /dev/null +++ b/docs_v2/src/pages/index.js @@ -0,0 +1,7 @@ +import { Redirect } from "@docusaurus/router"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import React from "react"; + +export default function Home() { + return ; +} diff --git a/docs_v2/src/pages/index.module.css b/docs_v2/src/pages/index.module.css new file mode 100644 index 00000000..9f71a5da --- /dev/null +++ b/docs_v2/src/pages/index.module.css @@ -0,0 +1,23 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} diff --git a/docs_v2/src/pages/markdown-page.md b/docs_v2/src/pages/markdown-page.md new file mode 100644 index 00000000..9756c5b6 --- /dev/null +++ b/docs_v2/src/pages/markdown-page.md @@ -0,0 +1,7 @@ +--- +title: Markdown page example +--- + +# Markdown page example + +You don't need React to write simple standalone pages. diff --git a/docs_v2/static/.nojekyll b/docs_v2/static/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs_v2/static/img/favicon.ico b/docs_v2/static/img/favicon.ico new file mode 100644 index 00000000..4c296111 Binary files /dev/null and b/docs_v2/static/img/favicon.ico differ diff --git a/docs_v2/static/img/langchain.dart.png b/docs_v2/static/img/langchain.dart.png new file mode 100644 index 00000000..f7fdb39f Binary files /dev/null and b/docs_v2/static/img/langchain.dart.png differ diff --git a/docs_v2/static/img/logo.svg b/docs_v2/static/img/logo.svg new file mode 100644 index 00000000..9db6d0d0 --- /dev/null +++ b/docs_v2/static/img/logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/browser_summarizer/lib/chrome/chrome_api.dart b/examples/browser_summarizer/lib/chrome/chrome_api.dart index d60ac8b7..9ab8b8b4 100644 --- a/examples/browser_summarizer/lib/chrome/chrome_api.dart +++ b/examples/browser_summarizer/lib/chrome/chrome_api.dart @@ -1,6 +1,6 @@ // ignore_for_file: public_member_api_docs @JS('chrome') -library chrome; +library; import 'package:js/js.dart'; diff --git a/examples/browser_summarizer/pubspec.lock b/examples/browser_summarizer/pubspec.lock index 5050c14b..c6a87ff2 100644 --- a/examples/browser_summarizer/pubspec.lock +++ b/examples/browser_summarizer/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -53,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -93,26 +93,26 @@ packages: dependency: transitive description: name: fetch_api - sha256: "77f3be8c9acaa64ed37dd49c21c056da71b78053d31131ca26a273884a753f66" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "2.0.0-wasm" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: e11722d7d8cd21f944b52af780392274f7c34a41156b1c80053fc2a414e09a1b + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.1.0-wasm" + version: "1.1.2" ffi: dependency: transitive description: name: ffi - sha256: "7bf0adc28a23d395f19f3f1eb21dd7cfd1dd9f8e1c50051c069122e6853bc878" + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.1.3" file: dependency: transitive description: @@ -129,6 +129,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" flutter: dependency: "direct main" description: flutter @@ -138,18 +146,18 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" flutter_markdown: dependency: "direct main" description: name: flutter_markdown - sha256: "87e11b9df25a42e2db315b8b7a51fae8e66f57a4b2f50ec4b822d0fa155e6b52" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.22" + version: "0.7.3+1" flutter_web_plugins: dependency: transitive description: flutter @@ -159,10 +167,10 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" html: dependency: transitive description: @@ -175,10 +183,10 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -207,46 +215,46 @@ packages: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -259,10 +267,10 @@ packages: dependency: transitive description: name: markdown - sha256: acf35edccc0463a9d7384e437c015a3535772e09714cf60e07eeef3a15870dcd + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 url: "https://pub.dev" source: hosted - version: "7.1.1" + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -275,10 +283,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -291,10 +299,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" nested: dependency: transitive description: @@ -303,13 +311,21 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.4.2" path: dependency: transitive description: @@ -338,10 +354,10 @@ packages: dependency: transitive description: name: path_provider_windows - sha256: "8bc9f22eee8690981c22aa7fc602f5c85b497a6fb2ceb35ee5a5e5ed85ad8170" + sha256: bd6f00dbd873bfb70d0761682da2b3a2c2fccc2b9e84c495821639601d81afe7 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.0" petitparser: dependency: transitive description: @@ -354,10 +370,10 @@ packages: dependency: transitive description: name: platform - sha256: "12220bb4b65720483f8fa9450b4332347737cf8213dd2840d8b2c823e47243ec" + sha256: "9b71283fc13df574056616011fb138fd3b793ea47cc509c189a6c3fa5f8a1a65" url: "https://pub.dev" source: hosted - version: "3.1.4" + version: "3.1.5" plugin_platform_interface: dependency: transitive description: @@ -370,10 +386,10 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "6.1.2" rfc_6901: dependency: transitive description: @@ -386,66 +402,66 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shared_preferences: dependency: "direct main" description: name: shared_preferences - sha256: "81429e4481e1ccfb51ede496e916348668fd0921627779233bd24cc3ff6abd02" + sha256: c272f9cabca5a81adc9b0894381e9c1def363e980f960fa903c604c471b22f68 url: "https://pub.dev" source: hosted - version: "2.2.2" + version: "2.3.1" shared_preferences_android: dependency: transitive description: name: shared_preferences_android - sha256: "8568a389334b6e83415b6aae55378e158fbc2314e074983362d20c562780fb06" + sha256: a7e8467e9181cef109f601e3f65765685786c1a738a83d7fbbde377589c0d974 url: "https://pub.dev" source: hosted - version: "2.2.1" + version: "2.3.1" shared_preferences_foundation: dependency: transitive description: name: shared_preferences_foundation - sha256: "7bf53a9f2d007329ee6f3df7268fd498f8373602f943c975598bbb34649b62a7" + sha256: c4b35f6cb8f63c147312c054ce7c2254c8066745125264f0c88739c417fc9d9f url: "https://pub.dev" source: hosted - version: "2.3.4" + version: "2.5.2" shared_preferences_linux: dependency: transitive description: name: shared_preferences_linux - sha256: "9f2cbcf46d4270ea8be39fa156d86379077c8a5228d9dfdb1164ae0bb93f1faa" + sha256: "580abfd40f415611503cae30adf626e6656dfb2f0cee8f465ece7b6defb40f2f" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_platform_interface: dependency: transitive description: name: shared_preferences_platform_interface - sha256: "22e2ecac9419b4246d7c22bfbbda589e3acf5c0351137d87dd2939d984d37c3b" + sha256: "57cbf196c486bc2cf1f02b85784932c6094376284b3ad5779d1b1c6c6a816b80" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" shared_preferences_web: dependency: transitive description: name: shared_preferences_web - sha256: "9aee1089b36bd2aafe06582b7d7817fd317ef05fc30e6ba14bff247d0933042a" + sha256: d2ca4132d3946fec2184261726b355836a82c33d7d5b67af32692aff18a4684e url: "https://pub.dev" source: hosted - version: "2.3.0" + version: "2.4.2" shared_preferences_windows: dependency: transitive description: name: shared_preferences_windows - sha256: "841ad54f3c8381c480d0c9b508b89a34036f512482c407e6df7a9c4aa2ef8f59" + sha256: "94ef0f72b2d71bc3e700e025db3710911bd51a71cefb65cc609dd0d9a982e3c1" url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.1" sky_engine: dependency: transitive description: flutter @@ -471,10 +487,17 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.1.0" term_glyph: dependency: transitive description: @@ -495,10 +518,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -511,18 +534,10 @@ packages: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" - url: "https://pub.dev" - source: hosted - version: "0.5.1" - win32: - dependency: transitive - description: - name: win32 - sha256: "464f5674532865248444b4c3daca12bd9bf2d7c47f759ce2617986e7229494a8" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "5.2.0" + version: "1.0.0" xdg_directories: dependency: transitive description: @@ -532,5 +547,5 @@ packages: source: hosted version: "1.0.4" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/examples/browser_summarizer/pubspec.yaml b/examples/browser_summarizer/pubspec.yaml index 9a2c4936..42a5999e 100644 --- a/examples/browser_summarizer/pubspec.yaml +++ b/examples/browser_summarizer/pubspec.yaml @@ -4,19 +4,19 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 js: ^0.7.1 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 - shared_preferences: ^2.2.2 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 + shared_preferences: ^2.3.0 flutter: uses-material-design: true diff --git a/examples/browser_summarizer/pubspec_overrides.yaml b/examples/browser_summarizer/pubspec_overrides.yaml index 3947b2ae..808fbc3a 100644 --- a/examples/browser_summarizer/pubspec_overrides.yaml +++ b/examples/browser_summarizer/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core,langchain_community +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../../packages/langchain @@ -10,3 +10,5 @@ dependency_overrides: path: ../../packages/langchain_openai openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/examples/docs_examples/README.md b/examples/docs_examples/README.md index a2dc3095..6ec73e85 100644 --- a/examples/docs_examples/README.md +++ b/examples/docs_examples/README.md @@ -1,3 +1,3 @@ # Docs examples -Examples used in https://langchaindart.com documentation. +Examples used in https://langchaindart.dev documentation. diff --git a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart index f34fab19..21cea3b4 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart @@ -32,7 +32,7 @@ Future _promptTemplateLLM() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835666, // system_fingerprint: fp_3b956da36b // }, @@ -65,7 +65,7 @@ Future _attachingStopSequences() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835734, // system_fingerprint: fp_a450710239 // }, @@ -133,7 +133,7 @@ Future _attachingToolCallInformation() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714835806, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/cookbook/routing.dart b/examples/docs_examples/bin/expression_language/cookbook/routing.dart index d177611d..1f2232a0 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/routing.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/routing.dart @@ -9,7 +9,7 @@ void main(final List arguments) async { Future _runnableRouter() async { final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final classificationChain = PromptTemplate.fromTemplate(''' @@ -114,7 +114,7 @@ Here is a question: '''; final embeddings = OllamaEmbeddings( - model: 'llama3', + model: 'llama3.2', ); final promptTemplates = [physicsTemplate, historyTemplate]; final promptEmbeddings = await embeddings.embedDocuments( @@ -132,7 +132,7 @@ Here is a question: return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]); }) | ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ) | const StringOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart index 7af0bb43..d6b8cdae 100644 --- a/examples/docs_examples/bin/expression_language/cookbook/streaming.dart +++ b/examples/docs_examples/bin/expression_language/cookbook/streaming.dart @@ -33,7 +33,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -49,7 +49,7 @@ Future _languageModels() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714143945, // system_fingerprint: fp_3b956da36b // }, @@ -79,9 +79,7 @@ Future _inputStreams() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -125,9 +123,7 @@ Future _inputStreamMapper() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/fallbacks.dart b/examples/docs_examples/bin/expression_language/fallbacks.dart new file mode 100644 index 00000000..8eea7bb2 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/fallbacks.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithFallbacks(); + await _modelWithMultipleFallbacks(); + await _chainWithFallbacks(); +} + +Future _modelWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); +/* +{ + "ChatResult": { + "id": "chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA", + "output": { + "AIChatMessage": { + "content": "The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.", + "toolCalls": [] + } + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721542696, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "LanguageModelUsage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 36, + "responseBillableCharacters": null, + "totalTokens": 52 + } + }, + "streaming": false + } +} +*/ +} + +Future _modelWithMultipleFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel1 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'tomato')); + + final fakeOpenAIModel2 = + ChatOpenAI(defaultOptions: const ChatOpenAIOptions(model: 'potato')); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final modelWithFallbacks = + fakeOpenAIModel1.withFallbacks([fakeOpenAIModel2, latestModel]); + + final prompt = PromptValue.string('Explain why sky is blue in 2 lines'); + + final res = await modelWithFallbacks.invoke(prompt); + print(res); + /* + { + "id": "chatcmpl-9nLKW345nrh0nzmw18iO35XnoQ2jo", + "output": { + "content": "The sky appears blue due to Rayleigh scattering, where shorter blue wavelengths of sunlight are scattered more than other colors by the molecules in Earth's atmosphere. This scattering disperses blue light in all directions, making the sky look blue.", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721547092, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 16, + "promptBillableCharacters": null, + "responseTokens": 45, + "responseBillableCharacters": null, + "totalTokens": 61 + }, + "streaming": false +} +*/ +} + +Future _chainWithFallbacks() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + + final fakeOpenAIModel = ChatOpenAI( + defaultOptions: const ChatOpenAIOptions(model: 'tomato'), + ); + + final latestModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + final badChain = promptTemplate.pipe(fakeOpenAIModel); + final goodChain = promptTemplate.pipe(latestModel); + + final chainWithFallbacks = badChain.withFallbacks([goodChain]); + + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); +/* +[ + { + "id": "chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI", + "output": { + "content": "Sure! How about this one?\n\nWhy did the bear bring a suitcase to the forest?\n\nBecause it wanted to pack a lunch! 🐻🌲", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_400f27fa1f" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 31, + "responseBillableCharacters": null, + "totalTokens": 44 + }, + "streaming": false + }, + { + "id": "chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh", + "output": { + "content": "Sure, here's a cat joke for you:\n\nWhy was the cat sitting on the computer?\n\nBecause it wanted to keep an eye on the mouse!", + "toolCalls": [] + }, + "finishReason": "FinishReason.stop", + "metadata": { + "model": "gpt-4o-2024-05-13", + "created": 1721545052, + "system_fingerprint": "fp_c4e5b6fa31" + }, + "usage": { + "promptTokens": 13, + "promptBillableCharacters": null, + "responseTokens": 29, + "responseBillableCharacters": null, + "totalTokens": 42 + }, + "streaming": false + } +] +*/ +} diff --git a/examples/docs_examples/bin/expression_language/get_started.dart b/examples/docs_examples/bin/expression_language/get_started.dart index 5ccc2505..c3ecbd1f 100644 --- a/examples/docs_examples/bin/expression_language/get_started.dart +++ b/examples/docs_examples/bin/expression_language/get_started.dart @@ -82,7 +82,7 @@ Future _promptModelOutputParser() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714327251, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/interface.dart b/examples/docs_examples/bin/expression_language/interface.dart index f678f18a..f2a634b7 100644 --- a/examples/docs_examples/bin/expression_language/interface.dart +++ b/examples/docs_examples/bin/expression_language/interface.dart @@ -96,7 +96,7 @@ Future _runnableInterfaceBatchOptions() async { {'topic': 'cats'}, ], options: [ - const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5), + const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5), const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7), ], ); diff --git a/examples/docs_examples/bin/expression_language/primitives/binding.dart b/examples/docs_examples/bin/expression_language/primitives/binding.dart index 1c456ef7..d16d81d8 100644 --- a/examples/docs_examples/bin/expression_language/primitives/binding.dart +++ b/examples/docs_examples/bin/expression_language/primitives/binding.dart @@ -63,7 +63,7 @@ Future _differentModels() async { chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) | outputParser, 'q2': prompt2 | - chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | + chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, }); final res = await chain.invoke({'name': 'David'}); diff --git a/examples/docs_examples/bin/expression_language/primitives/function.dart b/examples/docs_examples/bin/expression_language/primitives/function.dart index 8c631877..029322bb 100644 --- a/examples/docs_examples/bin/expression_language/primitives/function.dart +++ b/examples/docs_examples/bin/expression_language/primitives/function.dart @@ -73,7 +73,7 @@ Future _function() async { // }, // finishReason: FinishReason.stop, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463309, // system_fingerprint: fp_3b956da36b // }, @@ -116,7 +116,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -135,7 +135,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, @@ -154,7 +154,7 @@ Future _function() async { // }, // finishReason: FinishReason.unspecified, // metadata: { - // model: gpt-3.5-turbo-0125, + // model: gpt-4o-mini, // created: 1714463766, // system_fingerprint: fp_3b956da36b // }, diff --git a/examples/docs_examples/bin/expression_language/primitives/mapper.dart b/examples/docs_examples/bin/expression_language/primitives/mapper.dart index 818ed0d7..c9d0400a 100644 --- a/examples/docs_examples/bin/expression_language/primitives/mapper.dart +++ b/examples/docs_examples/bin/expression_language/primitives/mapper.dart @@ -63,9 +63,7 @@ Future _mapInputStream() async { final model = ChatOpenAI( apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); diff --git a/examples/docs_examples/bin/expression_language/primitives/retry.dart b/examples/docs_examples/bin/expression_language/primitives/retry.dart new file mode 100644 index 00000000..917ac501 --- /dev/null +++ b/examples/docs_examples/bin/expression_language/primitives/retry.dart @@ -0,0 +1,177 @@ +// ignore_for_file: avoid_print +import 'dart:io'; +import 'package:langchain/langchain.dart'; +import 'package:langchain_openai/langchain_openai.dart'; + +void main() async { + await _modelWithRetry(); + await _chainWithRetry(); + await _withRetryOptions(); + await _withDelayDurations(); +} + +Future _modelWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final model = ChatOpenAI(apiKey: openaiApiKey); + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + + final modelWithRetry = model.withRetry(); + final res = await modelWithRetry.invoke(input); + print(res); + /* + ChatResult{ + id: chatcmpl-9zmFYnu19Pd6ss3zVFHlKN71DILtx, + output: AIChatMessage{ + content: The sky appears blue due to Rayleigh scattering, where shorter wavelengths of sunlight (blue light) are scattered more than longer wavelengths (red light) by the molecules in the Earth's atmosphere. This scattering effect is most prominent when the sun is high in the sky., + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-mini-2024-07-18, created: 1724510508, system_fingerprint: fp_48196bc67a}, + usage: LanguageModelUsage{ + promptTokens: 16, + promptBillableCharacters: null, + responseTokens: 52, + responseBillableCharacters: null, + totalTokens: 68} +, + streaming: false +} +*/ +} + +Future _chainWithRetry() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'), + ); + final chain = promptTemplate.pipe(model).withRetry(); + + final res = await chain.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + print(res); + /* + [ChatResult{ + id: chatcmpl-9zmjiMfHP2WP3PhM6YXdoHXS02ZAm, + output: AIChatMessage{ + content: Sure, here's a bear-themed joke for you: + +Why did the bear refuse to play cards? + +Because he was afraid he might get spotted—he couldn’t bear the tension! 🐻♠️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_3aa7262c27}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 41, + responseBillableCharacters: null, + totalTokens: 54} +, + streaming: false +}, ChatResult{ + id: chatcmpl-9zmji1gxCZ4yR3UtX7Af4TBrRhPP1, + output: AIChatMessage{ + content: Sure, here's one for you: + +Why did the cat sit on the computer? + +Because it wanted to keep an eye on the mouse! 🐱🖱️, + toolCalls: [], +}, + finishReason: FinishReason.stop, + metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_c9aa9c0491}, + usage: LanguageModelUsage{ + promptTokens: 13, + promptBillableCharacters: null, + responseTokens: 34, + responseBillableCharacters: null, + totalTokens: 47} +, + streaming: false +}] +*/ +} + +Future _withRetryOptions() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: true, + ); + final res = await modelWithRetry.invoke(input); + print(res); + /* + retry attempt 0 with delay duration 0:00:01.082000 + retry attempt 1 with delay duration 0:00:02.073000 + retry attempt 2 with delay duration 0:00:04.074000 + Unhandled exception: + Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 404, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "The model `fake-model` does not exist or you do not have access to it.", + "type": "invalid_request_error", + "param": null, + "code": "model_not_found" + } + } +})*/ +} + +Future _withDelayDurations() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + final input = PromptValue.string('Explain why sky is blue in 2 lines'); + final model = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions(model: 'fake-model'), + ); + final modelWithRetry = model.withRetry( + maxRetries: 3, + addJitter: false, + delayDurations: const [ + Duration(seconds: 1), + Duration(seconds: 2), + Duration(seconds: 3), + ], + ); + final res = await modelWithRetry.invoke(input); + print(res); + // retried with delays provided in RetryOptions + /* +retry attempt 0 with delay duration 0:00:01.000000 +retry attempt 1 with delay duration 0:00:02.000000 +retry attempt 2 with delay duration 0:00:03.000000 +Unhandled exception: +Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({ + "uri": "https://api.openai.com/v1/chat/completions", + "method": "POST", + "code": 401, + "message": "Unsuccessful response", + "body": { + "error": { + "message": "You didn't provide an API key. You need to provide your API key in an Authorization header using Bearer auth (i.e. Authorization: Bearer YOUR_KEY), or as the password field (with blank username) if you're accessing the API from your browser and are prompted for a username and password. You can obtain an API key from https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": null, + "code": null + } + } +}) +*/ +} diff --git a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart similarity index 64% rename from examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart rename to examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart index 16d0b44f..b934c0a7 100644 --- a/examples/docs_examples/bin/modules/agents/agent_types/openai_tools_agent.dart +++ b/examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart @@ -3,33 +3,34 @@ import 'dart:io'; import 'package:langchain/langchain.dart'; import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:langchain_openai/langchain_openai.dart'; void main() async { - await _openAIToolsAgent(); - await _openAIToolsAgentCustomToolsMemory(); - await _openAIToolsAgentLCEL(); + await _toolsAgent(); + await _toolsAgentCustomToolsMemory(); + await _toolsAgentLCEL(); } -Future _openAIToolsAgent() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', +Future _toolsAgent() async { + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.2', temperature: 0, ), ); final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); - final res = await executor.run('What is 40 raised to the 0.43 power? '); - print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + print(res); + // The result is: 4.885 } -Future _openAIToolsAgentCustomToolsMemory() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - +Future _toolsAgentCustomToolsMemory() async { final tool = Tool.fromFunction( name: 'search', description: 'Tool for searching the web.', @@ -41,7 +42,7 @@ Future _openAIToolsAgentCustomToolsMemory() async { 'description': 'The query to search for', }, 'n': { - 'type': 'number', + 'type': 'integer', 'description': 'The number of results to return', }, }, @@ -51,13 +52,15 @@ Future _openAIToolsAgentCustomToolsMemory() async { getInputFromJson: SearchInput.fromJson, ); - final llm = ChatOpenAI( - apiKey: openaiApiKey, - defaultOptions: const ChatOpenAIOptions(temperature: 0), + final llm = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3-groq-tool-use', + temperature: 0, + ), ); final memory = ConversationBufferMemory(returnMessages: true); - final agent = OpenAIToolsAgent.fromLLMAndTools( + final agent = ToolsAgent.fromLLMAndTools( llm: llm, tools: [tool], memory: memory, @@ -66,7 +69,7 @@ Future _openAIToolsAgentCustomToolsMemory() async { final executor = AgentExecutor(agent: agent); final res1 = await executor.run( - 'Search for cats. Return only 3 results.', + 'Search for cat names. Return only 3 results.', ); print(res1); // Here are 3 search results for "cats": @@ -92,11 +95,16 @@ class SearchInput { } String callYourSearchFunction(final SearchInput input) { - return 'Results:\n${List.generate(input.n, (final i) => 'Result ${i + 1}').join('\n')}'; + final n = input.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; } -Future _openAIToolsAgentLCEL() async { - final openaiApiKey = Platform.environment['OPENAI_API_KEY']; +Future _toolsAgentLCEL() async { + final openAiKey = Platform.environment['OPENAI_API_KEY']; final prompt = ChatPromptTemplate.fromTemplates(const [ (ChatMessageType.system, 'You are a helpful assistant'), @@ -107,18 +115,19 @@ Future _openAIToolsAgentLCEL() async { final tool = CalculatorTool(); final model = ChatOpenAI( - apiKey: openaiApiKey, + apiKey: openAiKey, defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o-mini', temperature: 0, tools: [tool], ), ); - const outputParser = OpenAIToolsAgentOutputParser(); + const outputParser = ToolsAgentOutputParser(); List buildScratchpad(final List intermediateSteps) { return intermediateSteps - .map((final s) { + .map((s) { return s.action.messageLog + [ ChatMessage.tool( @@ -127,13 +136,13 @@ Future _openAIToolsAgentLCEL() async { ), ]; }) - .expand((final m) => m) + .expand((m) => m) .toList(growable: false); } final agent = Agent.fromRunnable( Runnable.mapInput( - (final AgentPlanInput planInput) => { + (AgentPlanInput planInput) => { 'input': planInput.inputs['input'], 'agent_scratchpad': buildScratchpad(planInput.intermediateSteps), }, @@ -143,8 +152,9 @@ Future _openAIToolsAgentLCEL() async { final executor = AgentExecutor(agent: agent); final res = await executor.invoke({ - 'input': 'What is 40 raised to the 0.43 power?', + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', }); print(res['output']); - // 40 raised to the power of 0.43 is approximately 4.88524. + // The result of 40 raised to the power of 0.43 is approximately 4.885. } diff --git a/examples/docs_examples/bin/modules/agents/tools/calculator.dart b/examples/docs_examples/bin/modules/agents/tools/calculator.dart index 5d92dc27..acab2d65 100644 --- a/examples/docs_examples/bin/modules/agents/tools/calculator.dart +++ b/examples/docs_examples/bin/modules/agents/tools/calculator.dart @@ -15,7 +15,7 @@ void main() async { ), ); final tool = CalculatorTool(); - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); final executor = AgentExecutor(agent: agent); final res = await executor.run('What is 40 raised to the 0.43 power? '); print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart index f62d7bde..7144ea82 100644 --- a/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart +++ b/examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart @@ -18,7 +18,7 @@ void main() async { CalculatorTool(), OpenAIDallETool(apiKey: openAiKey), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); + final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); final executor = AgentExecutor(agent: agent); final res = await executor.run( 'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. ' diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart new file mode 100644 index 00000000..45c1cd55 --- /dev/null +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart @@ -0,0 +1,109 @@ +// ignore_for_file: avoid_print +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_anthropic/langchain_anthropic.dart'; + +void main(final List arguments) async { + await _invokeModel(); + await _multiModal(); + await _streaming(); +} + +Future _invokeModel() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chatPrompt = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that translates {input_language} to {output_language}.' + ), + (ChatMessageType.human, 'Text to translate:\n{text}'), + ]); + + final chain = chatPrompt | chatModel | const StringOutputParser(); + + final res = await chain.invoke({ + 'input_language': 'English', + 'output_language': 'French', + 'text': 'I love programming.', + }); + print(res); + // -> 'J'adore programmer.' + + chatModel.close(); +} + +Future _multiModal() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ), + ]), + ); + print(res.output.content); + // -> 'The fruit in the image is an apple.' + + chatModel.close(); +} + +Future _streaming() async { + final apiKey = Platform.environment['ANTHROPIC_API_KEY']; + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas.', + ), + (ChatMessageType.human, 'List the numbers from 1 to {max_num}'), + ]); + + final chatModel = ChatAnthropic( + apiKey: apiKey, + defaultOptions: const ChatAnthropicOptions( + model: 'claude-3-5-sonnet-20240620', + temperature: 0, + ), + ); + + final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser()); + + final stream = chain.stream({'max_num': '30'}); + await stream.forEach(print); + // 123 + // 456789101 + // 112131415161 + // 718192021222 + // 324252627282 + // 930 + + chatModel.close(); +} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart index 4e5cf3b5..3473a738 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart @@ -8,8 +8,11 @@ import 'package:langchain_ollama/langchain_ollama.dart'; void main(final List arguments) async { await _chatOllama(); await _chatOllamaStreaming(); - await _chatOllamaJsonMode(); await _chatOllamaMultimodal(); + await _chatOllamaToolCalling(); + await _chatOllamaJsonMode(); + await _extraction(); + await _flights(); await _rag(); } @@ -24,7 +27,7 @@ Future _chatOllama() async { final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, ), ); @@ -51,7 +54,7 @@ Future _chatOllamaStreaming() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, ), ); @@ -66,6 +69,70 @@ Future _chatOllamaStreaming() async { // 9 } +Future _chatOllamaMultimodal() async { + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llava', + temperature: 0, + ), + ); + final prompt = ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + data: base64.encode( + await File('./bin/assets/apple.jpeg').readAsBytes(), + ), + ), + ]), + ); + final res = await chatModel.invoke(PromptValue.chat([prompt])); + print(res.output.content); + // -> 'An Apple' +} + +Future _chatOllamaToolCalling() async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + }, + 'required': ['location'], + }, + ); + + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + tools: [tool], + ), + ); + + final res = await chatModel.invoke( + PromptValue.string( + 'What’s the weather like in Boston and Madrid right now in celsius?', + ), + ); + print(res.output.toolCalls); + // [AIChatMessageToolCall{ + // id: a621064b-03b3-4ca6-8278-f37504901034, + // name: get_current_weather, + // arguments: {location: Boston, US}, + // }, + // AIChatMessageToolCall{ + // id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53, + // name: get_current_weather, + // arguments: {location: Madrid, ES}, + // }] +} + Future _chatOllamaJsonMode() async { final promptTemplate = ChatPromptTemplate.fromTemplates(const [ ( @@ -76,7 +143,7 @@ Future _chatOllamaJsonMode() async { ]); final chat = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llama3', + model: 'llama3.2', temperature: 0, format: OllamaResponseFormat.json, ), @@ -94,32 +161,194 @@ Future _chatOllamaJsonMode() async { // {Spain: 46735727, The Netherlands: 17398435, France: 65273538} } -Future _chatOllamaMultimodal() async { +Future _extraction() async { + const tool = ToolSpec( + name: 'information_extraction', + description: 'Extracts the relevant information from the passage', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'people': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of a person', + }, + 'height': { + 'type': 'number', + 'description': 'The height of the person in cm', + }, + 'hair_color': { + 'type': 'string', + 'description': 'The hair color of the person', + 'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'], + }, + }, + 'required': ['name', 'height', 'hair_color'], + }, + }, + }, + 'required': ['people'], + }, + ); + + final model = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: 'llama3.2', + temperature: 0, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: tool.name), + ), + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate(''' +Extract and save the relevant entities mentioned in the following passage together with their properties. + +Passage: +{input}'''); + + final chain = Runnable.getMapFromInput() + .pipe(promptTemplate) + .pipe(model) + .pipe(ToolsOutputParser()); + + final res = await chain.invoke( + 'Alex is 5 feet tall. ' + 'Claudia is 1 foot taller than Alex and jumps higher than him. ' + 'Claudia has orange hair and Alex is blonde.', + ); + final extractedData = res.first.arguments; + print(extractedData); + // { + // people: [ + // { + // name: Alex, + // height: 152, + // hair_color: blonde + // }, + // { + // name: Claudia, + // height: 183, + // hair_color: orange + // } + // ] + // } +} + +// Simulates an API call to get flight times +// In a real application, this would fetch data from a live database or API +String getFlightTimes(String departure, String arrival) { + final flights = { + 'NYC-LAX': { + 'departure': '08:00 AM', + 'arrival': '11:30 AM', + 'duration': '5h 30m', + }, + 'LAX-NYC': { + 'departure': '02:00 PM', + 'arrival': '10:30 PM', + 'duration': '5h 30m', + }, + 'LHR-JFK': { + 'departure': '10:00 AM', + 'arrival': '01:00 PM', + 'duration': '8h 00m', + }, + 'JFK-LHR': { + 'departure': '09:00 PM', + 'arrival': '09:00 AM', + 'duration': '7h 00m', + }, + 'CDG-DXB': { + 'departure': '11:00 AM', + 'arrival': '08:00 PM', + 'duration': '6h 00m', + }, + 'DXB-CDG': { + 'departure': '03:00 AM', + 'arrival': '07:30 AM', + 'duration': '7h 30m', + }, + }; + + final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}'; + return jsonEncode(flights[key] ?? {'error': 'Flight not found'}); +} + +Future _flights() async { + const getFlightTimesTool = ToolSpec( + name: 'get_flight_times', + description: 'Get the flight times between two cities', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'departure': { + 'type': 'string', + 'description': 'The departure city (airport code)', + }, + 'arrival': { + 'type': 'string', + 'description': 'The arrival city (airport code)', + }, + }, + 'required': ['departure', 'arrival'], + }, + ); + final chatModel = ChatOllama( defaultOptions: const ChatOllamaOptions( - model: 'llava', + model: 'llama3.2', temperature: 0, + tools: [getFlightTimesTool], ), ); - final prompt = ChatMessage.human( - ChatMessageContent.multiModal([ - ChatMessageContent.text('What fruit is this?'), - ChatMessageContent.image( - data: base64.encode( - await File('./bin/assets/apple.jpeg').readAsBytes(), - ), + + final messages = [ + ChatMessage.humanText( + 'What is the flight time from New York (NYC) to Los Angeles (LAX)?', + ), + ]; + + // First API call: Send the query and function description to the model + final response = await chatModel.invoke(PromptValue.chat(messages)); + + messages.add(response.output); + + // Check if the model decided to use the provided function + if (response.output.toolCalls.isEmpty) { + print("The model didn't use the function. Its response was:"); + print(response.output.content); + return; + } + + // Process function calls made by the model + for (final toolCall in response.output.toolCalls) { + final functionResponse = getFlightTimes( + toolCall.arguments['departure'], + toolCall.arguments['arrival'], + ); + // Add function response to the conversation + messages.add( + ChatMessage.tool( + toolCallId: toolCall.id, + content: functionResponse, ), - ]), - ); - final res = await chatModel.invoke(PromptValue.chat([prompt])); - print(res.output.content); - // -> 'An Apple' + ); + } + + // Second API call: Get final response from the model + final finalResponse = await chatModel.invoke(PromptValue.chat(messages)); + print(finalResponse.output.content); + // The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes. } Future _rag() async { // 1. Create a vector store and add documents to it final vectorStore = MemoryVectorStore( - embeddings: OllamaEmbeddings(model: 'llama3'), + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), ); await vectorStore.addDocuments( documents: [ @@ -141,7 +370,7 @@ Future _rag() async { // 3. Define the model to use and the vector store retriever final chatModel = ChatOllama( - defaultOptions: const ChatOllamaOptions(model: 'llama3'), + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), ); final retriever = vectorStore.asRetriever( defaultOptions: const VectorStoreRetrieverOptions( diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart index 439943c5..f552e60b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart @@ -7,6 +7,7 @@ import 'package:langchain_openai/langchain_openai.dart'; void main(final List arguments) async { await _openRouter(); await _openRouterStreaming(); + await _openRouterStreamingTools(); } Future _openRouter() async { @@ -66,3 +67,56 @@ Future _openRouterStreaming() async { // 123 // 456789 } + +Future _openRouterStreamingTools() async { + final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final outputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(outputParser); + + final stream = chain.stream({'foo': 'bears'}); + await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); + } + // {} + // {setup: } + // {setup: Why don't} + // {setup: Why don't bears} + // {setup: Why don't bears like fast food} + // {setup: Why don't bears like fast food?, punchline: } + // {setup: Why don't bears like fast food?, punchline: Because} + // {setup: Why don't bears like fast food?, punchline: Because they can't} + // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +} diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart index 6d302daf..2b6ea9df 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart @@ -131,9 +131,7 @@ Future _chatOpenAIJsonMode() async { defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', temperature: 0, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final chain = llm.pipe(JsonOutputParser()); diff --git a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart index 2095d341..aae53fa7 100644 --- a/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart +++ b/examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart @@ -13,7 +13,7 @@ Future _ollama() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.2', ), ); @@ -29,7 +29,7 @@ Future _ollamaStreaming() async { ); final llm = Ollama( defaultOptions: const OllamaOptions( - model: 'llama3', + model: 'llama3.2', ), ); const stringOutputParser = StringOutputParser(); diff --git a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart index 8005f8d0..b921ec7d 100644 --- a/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart +++ b/examples/docs_examples/bin/modules/model_io/output_parsers/json.dart @@ -22,9 +22,7 @@ Future _invoke() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); final parser = JsonOutputParser(); @@ -51,9 +49,7 @@ Future _streaming() async { apiKey: openAiApiKey, defaultOptions: const ChatOpenAIOptions( model: 'gpt-4-turbo', - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, - ), + responseFormat: ChatOpenAIResponseFormat.jsonObject, ), ); diff --git a/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart new file mode 100644 index 00000000..92d419c9 --- /dev/null +++ b/examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart @@ -0,0 +1,108 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + await _rag(); +} + +Future _rag() async { + // 1. Instantiate vector store + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'), + dimensions: 512, + directory: 'bin/modules/retrieval/vector_stores/integrations', + ); + + // 2. Load documents + const loader = WebBaseLoader([ + 'https://objectbox.io/on-device-vector-databases-and-edge-ai/', + 'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/', + 'https://objectbox.io/on-device-vector-database-for-dart-flutter/', + 'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//', + ]); + final List docs = await loader.load(); + + // 3. Split docs into chunks + const splitter = RecursiveCharacterTextSplitter( + chunkSize: 500, + chunkOverlap: 0, + ); + final List chunkedDocs = await splitter.invoke(docs); + + // 4. Add documents to vector store + await vectorStore.addDocuments(documents: chunkedDocs); + + // 5. Construct a RAG prompt template + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +You are an assistant for question-answering tasks. + +Use the following pieces of retrieved context to answer the user question. + +Context: +{context} + +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Cite the source you used to answer the question. + +Example: +""" +One sentence [1]. Another sentence [2]. + +Sources: +[1] https://example.com/1 +[2] https://example.com/2 +""" +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + // 6. Define the model to use and the vector store retriever + final chatModel = ChatOllama( + defaultOptions: const ChatOllamaOptions(model: 'llama3.2'), + ); + final retriever = vectorStore.asRetriever(); + + // 7. Create a Runnable that combines the retrieved documents into a single string + final docCombiner = Runnable.mapInput, String>((docs) { + return docs + .map( + (final d) => ''' +Source: ${d.metadata['source']} +Title: ${d.metadata['title']} +Content: ${d.pageContent} +--- +''', + ) + .join('\n'); + }); + + // 8. Define the RAG pipeline + final chain = Runnable.fromMap({ + 'context': retriever.pipe(docCombiner), + 'question': Runnable.passthrough(), + }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser()); + + // 9. Run the pipeline + final stream = chain.stream( + 'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?', + ); + await stream.forEach(stdout.write); + // According to the sources provided, ObjectBox Vector Search uses the HNSW + // (Hierarchical Navigable Small World) algorithm [1]. + // + // And yes, you can use it in Flutter apps. The article specifically mentions + // that ObjectBox 4.0 introduces an on-device vector database for the + // Dart/Flutter platform [2]. + // + // Sources: + // [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/ + // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/ +} diff --git a/examples/docs_examples/pubspec.lock b/examples/docs_examples/pubspec.lock index d3e56a8f..1a928bf7 100644 --- a/examples/docs_examples/pubspec.lock +++ b/examples/docs_examples/pubspec.lock @@ -5,18 +5,25 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" + anthropic_sdk_dart: + dependency: "direct overridden" + description: + path: "../../packages/anthropic_sdk_dart" + relative: true + source: path + version: "0.1.0" args: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -47,23 +54,23 @@ packages: path: "../../packages/chromadb" relative: true source: path - version: "0.2.0" + version: "0.2.0+1" collection: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -92,18 +99,26 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + url: "https://pub.dev" + source: hosted + version: "1.1.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "2.1.3" fixnum: dependency: transitive description: @@ -112,54 +127,62 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" gcloud: dependency: transitive description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_generative_ai: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.4" google_identity_services_web: dependency: transitive description: name: google_identity_services_web - sha256: "9482364c9f8b7bd36902572ebc3a7c2b5c8ee57a9c93e6eb5099c1a9ec5265d8" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.1+1" + version: "0.3.1+4" googleapis: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: transitive description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" html: dependency: transitive description: @@ -172,18 +195,18 @@ packages: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" iregexp: dependency: transitive description: @@ -192,86 +215,85 @@ packages: url: "https://pub.dev" source: hosted version: "0.1.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" json_path: dependency: transitive description: name: json_path - sha256: "149d32ceb7dc22422ea6d09e401fd688f54e1343bc9ff8c3cb1900ca3b1ad8b1" + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" url: "https://pub.dev" source: hosted - version: "0.7.1" + version: "0.7.4" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" + langchain_anthropic: + dependency: "direct main" + description: + path: "../../packages/langchain_anthropic" + relative: true + source: path + version: "0.1.1+2" langchain_chroma: dependency: "direct main" description: path: "../../packages/langchain_chroma" relative: true source: path - version: "0.2.0+4" + version: "0.2.1+3" langchain_community: dependency: "direct main" description: path: "../../packages/langchain_community" relative: true source: path - version: "0.2.0+1" + version: "0.3.2" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" langchain_google: dependency: "direct main" description: path: "../../packages/langchain_google" relative: true source: path - version: "0.5.0" + version: "0.6.3+1" langchain_mistralai: dependency: "direct main" description: path: "../../packages/langchain_mistralai" relative: true source: path - version: "0.2.0+1" + version: "0.2.3+1" langchain_ollama: dependency: "direct main" description: path: "../../packages/langchain_ollama" relative: true source: path - version: "0.2.1" + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -284,10 +306,10 @@ packages: dependency: transitive description: name: math_expressions - sha256: "3576593617c3870d75728a751f6ec6e606706d44e363f088ac394b5a28a98064" + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 url: "https://pub.dev" source: hosted - version: "2.4.0" + version: "2.6.0" maybe_just_nothing: dependency: transitive description: @@ -300,31 +322,39 @@ packages: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.15.0" mistralai_dart: dependency: "direct overridden" description: path: "../../packages/mistralai_dart" relative: true source: path - version: "0.0.3+1" + version: "0.0.3+3" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" ollama_dart: dependency: "direct overridden" description: path: "../../packages/ollama_dart" relative: true source: path - version: "0.1.0" + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.4.2" path: dependency: transitive description: @@ -361,10 +391,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -385,10 +415,17 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.1.0" term_glyph: dependency: transitive description: @@ -409,10 +446,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -427,14 +464,14 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/docs_examples/pubspec.yaml b/examples/docs_examples/pubspec.yaml index 0b57edbc..985f1d64 100644 --- a/examples/docs_examples/pubspec.yaml +++ b/examples/docs_examples/pubspec.yaml @@ -1,16 +1,17 @@ name: docs_examples -description: Examples used in langchaindart.com documentation. +description: Examples used in langchaindart.dev documentation. version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_chroma: ^0.2.0+4 - langchain_community: 0.2.0+1 - langchain_google: ^0.5.0 - langchain_mistralai: ^0.2.0+1 - langchain_ollama: ^0.2.1 - langchain_openai: ^0.6.1 + langchain: ^0.7.6 + langchain_anthropic: ^0.1.1+2 + langchain_chroma: ^0.2.1+3 + langchain_community: 0.3.2 + langchain_google: ^0.6.3+1 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 diff --git a/examples/docs_examples/pubspec_overrides.yaml b/examples/docs_examples/pubspec_overrides.yaml index e02da308..4060d466 100644 --- a/examples/docs_examples/pubspec_overrides.yaml +++ b/examples/docs_examples/pubspec_overrides.yaml @@ -1,9 +1,13 @@ -# melos_managed_dependency_overrides: chromadb,langchain,langchain_chroma,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai,langchain_core,langchain_community +# melos_managed_dependency_overrides: anthropic_sdk_dart,chromadb,langchain,langchain_anthropic,langchain_chroma,langchain_community,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,tavily_dart,vertex_ai dependency_overrides: + anthropic_sdk_dart: + path: ../../packages/anthropic_sdk_dart chromadb: path: ../../packages/chromadb langchain: path: ../../packages/langchain + langchain_anthropic: + path: ../../packages/langchain_anthropic langchain_chroma: path: ../../packages/langchain_chroma langchain_community: @@ -24,5 +28,7 @@ dependency_overrides: path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + tavily_dart: + path: ../../packages/tavily_dart vertex_ai: path: ../../packages/vertex_ai diff --git a/examples/hello_world_backend/README.md b/examples/hello_world_backend/README.md index 4f00582c..70208b7a 100644 --- a/examples/hello_world_backend/README.md +++ b/examples/hello_world_backend/README.md @@ -7,7 +7,7 @@ It exposes a REST API that given a list of topics, generates a sonnet about them The HTTP server is implemented using [package:shelf](https://pub.dev/packages/shelf). -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ![Hello world backend](hello_world_backend.gif) diff --git a/examples/hello_world_backend/pubspec.lock b/examples/hello_world_backend/pubspec.lock index 3ef992b7..cbfd6954 100644 --- a/examples/hello_world_backend/pubspec.lock +++ b/examples/hello_world_backend/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,18 +69,18 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_methods: dependency: transitive description: @@ -93,47 +93,39 @@ packages: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "0.6.7" + version: "4.1.0" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -146,17 +138,17 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.4.2" path: dependency: transitive description: @@ -169,18 +161,18 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" shelf: dependency: "direct main" description: name: shelf - sha256: ad29c505aee705f41a4d8963641f91ac4cee3c8fad5947e033390a7bd8180fa4 + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 url: "https://pub.dev" source: hosted - version: "1.4.1" + version: "1.4.2" shelf_router: dependency: "direct main" description: @@ -225,10 +217,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -249,17 +241,17 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_backend/pubspec.yaml b/examples/hello_world_backend/pubspec.yaml index fa43a6d8..883ecefc 100644 --- a/examples/hello_world_backend/pubspec.yaml +++ b/examples/hello_world_backend/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1 - shelf: ^1.4.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 + shelf: ^1.4.2 shelf_router: ^1.1.4 diff --git a/examples/hello_world_backend/pubspec_overrides.yaml b/examples/hello_world_backend/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_backend/pubspec_overrides.yaml +++ b/examples/hello_world_backend/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_cli/README.md b/examples/hello_world_cli/README.md index 608daeb6..3ab0ed81 100644 --- a/examples/hello_world_cli/README.md +++ b/examples/hello_world_cli/README.md @@ -2,7 +2,7 @@ This sample app demonstrates how to call an LLM from a CLI application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) +You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) blog post. ## Usage diff --git a/examples/hello_world_cli/pubspec.lock b/examples/hello_world_cli/pubspec.lock index 42f90c1a..52a95a74 100644 --- a/examples/hello_world_cli/pubspec.lock +++ b/examples/hello_world_cli/pubspec.lock @@ -21,18 +21,18 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" cross_file: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -45,18 +45,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -69,63 +69,55 @@ packages: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "0.6.7" + version: "4.1.0" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -138,17 +130,17 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.4.2" path: dependency: transitive description: @@ -161,10 +153,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" source_span: dependency: transitive description: @@ -185,10 +177,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -209,17 +201,17 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: - dart: ">=3.3.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/hello_world_cli/pubspec.yaml b/examples/hello_world_cli/pubspec.yaml index 581a3927..55291147 100644 --- a/examples/hello_world_cli/pubspec.yaml +++ b/examples/hello_world_cli/pubspec.yaml @@ -4,8 +4,8 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - langchain: ^0.7.1 - langchain_openai: ^0.6.1 + langchain: ^0.7.6 + langchain_openai: ^0.7.2 diff --git a/examples/hello_world_cli/pubspec_overrides.yaml b/examples/hello_world_cli/pubspec_overrides.yaml index 93b5421a..a52f79af 100644 --- a/examples/hello_world_cli/pubspec_overrides.yaml +++ b/examples/hello_world_cli/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_openai,openai_dart dependency_overrides: langchain: path: ../../packages/langchain diff --git a/examples/hello_world_flutter/README.md b/examples/hello_world_flutter/README.md index 80a111af..eb983d97 100644 --- a/examples/hello_world_flutter/README.md +++ b/examples/hello_world_flutter/README.md @@ -1,9 +1,8 @@ -# Hello world Flutter +# Hello World Flutter -This sample app demonstrates how to call an LLM from a Flutter application using LangChain.dart. +This sample application demonstrates how to call various remote and local LLMs from a Flutter application using LangChain.dart. -You can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.com/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) -blog post. +![Hello World Flutter](hello_world_flutter.gif) ## Usage @@ -11,15 +10,5 @@ blog post. flutter run ``` -### Using OpenAI API - -You can get your OpenAI API key [here](https://platform.openai.com/account/api-keys). - -![OpenAI](hello_world_flutter_openai.gif) - -### Local model - -You can easily run local models using [Prem app](https://www.premai.io/#PremApp). It creates a local -server that exposes a REST API with the same interface as the OpenAI API. - -![Local](hello_world_flutter_local.gif) +- To use the remote providers you need to provide your API key. +- To use local models you need to have the [Ollama](https://ollama.ai/) app running and the model downloaded. diff --git a/examples/hello_world_flutter/android/app/build.gradle b/examples/hello_world_flutter/android/app/build.gradle index 48e93274..2c711c95 100644 --- a/examples/hello_world_flutter/android/app/build.gradle +++ b/examples/hello_world_flutter/android/app/build.gradle @@ -1,3 +1,9 @@ +plugins { + id 'com.android.application' + id 'kotlin-android' + id 'dev.flutter.flutter-gradle-plugin' +} + def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { @@ -6,11 +12,6 @@ if (localPropertiesFile.exists()) { } } -def flutterRoot = localProperties.getProperty('flutter.sdk') -if (flutterRoot == null) { - throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.") -} - def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' @@ -21,22 +22,18 @@ if (flutterVersionName == null) { flutterVersionName = '1.0' } -apply plugin: 'com.android.application' -apply plugin: 'kotlin-android' -apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle" - android { namespace "com.example.hello_world_flutter" compileSdkVersion flutter.compileSdkVersion ndkVersion flutter.ndkVersion compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { - jvmTarget = '1.8' + jvmTarget = '17' } sourceSets { @@ -44,10 +41,7 @@ android { } defaultConfig { - // TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html). applicationId "com.example.hello_world_flutter" - // You can update the following values to match your application needs. - // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. minSdkVersion flutter.minSdkVersion targetSdkVersion flutter.targetSdkVersion versionCode flutterVersionCode.toInteger() @@ -56,8 +50,6 @@ android { buildTypes { release { - // TODO: Add your own signing config for the release build. - // Signing with the debug keys for now, so `flutter run --release` works. signingConfig signingConfigs.debug } } @@ -66,7 +58,3 @@ android { flutter { source '../..' } - -dependencies { - implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" -} diff --git a/examples/hello_world_flutter/android/build.gradle b/examples/hello_world_flutter/android/build.gradle index f7eb7f63..bc157bd1 100644 --- a/examples/hello_world_flutter/android/build.gradle +++ b/examples/hello_world_flutter/android/build.gradle @@ -1,16 +1,3 @@ -buildscript { - ext.kotlin_version = '1.7.10' - repositories { - google() - mavenCentral() - } - - dependencies { - classpath 'com.android.tools.build:gradle:7.3.0' - classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" - } -} - allprojects { repositories { google() diff --git a/examples/hello_world_flutter/android/gradle.properties b/examples/hello_world_flutter/android/gradle.properties index 94adc3a3..a199917a 100644 --- a/examples/hello_world_flutter/android/gradle.properties +++ b/examples/hello_world_flutter/android/gradle.properties @@ -1,3 +1,3 @@ -org.gradle.jvmargs=-Xmx1536M +org.gradle.jvmargs=-Xmx8g -XX:+HeapDumpOnOutOfMemoryError -XX:+UseParallelGC -Dfile.encoding=UTF-8 android.useAndroidX=true android.enableJetifier=true diff --git a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties index 3c472b99..11fce01a 100644 --- a/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties +++ b/examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties @@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip diff --git a/examples/hello_world_flutter/android/settings.gradle b/examples/hello_world_flutter/android/settings.gradle index 44e62bcf..fd7c1580 100644 --- a/examples/hello_world_flutter/android/settings.gradle +++ b/examples/hello_world_flutter/android/settings.gradle @@ -1,11 +1,25 @@ -include ':app' +pluginManagement { + def flutterSdkPath = { + def properties = new Properties() + file("local.properties").withInputStream { properties.load(it) } + def flutterSdkPath = properties.getProperty("flutter.sdk") + assert flutterSdkPath != null, "flutter.sdk not set in local.properties" + return flutterSdkPath + }() -def localPropertiesFile = new File(rootProject.projectDir, "local.properties") -def properties = new Properties() + includeBuild("$flutterSdkPath/packages/flutter_tools/gradle") -assert localPropertiesFile.exists() -localPropertiesFile.withReader("UTF-8") { reader -> properties.load(reader) } + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} -def flutterSdkPath = properties.getProperty("flutter.sdk") -assert flutterSdkPath != null, "flutter.sdk not set in local.properties" -apply from: "$flutterSdkPath/packages/flutter_tools/gradle/app_plugin_loader.gradle" +plugins { + id "dev.flutter.flutter-plugin-loader" version "1.0.0" + id "com.android.application" version "8.2.2" apply false + id "org.jetbrains.kotlin.android" version "1.9.23" apply false +} + +include ":app" diff --git a/examples/hello_world_flutter/devtools_options.yaml b/examples/hello_world_flutter/devtools_options.yaml new file mode 100644 index 00000000..fa0b357c --- /dev/null +++ b/examples/hello_world_flutter/devtools_options.yaml @@ -0,0 +1,3 @@ +description: This file stores settings for Dart & Flutter DevTools. +documentation: https://docs.flutter.dev/tools/devtools/extensions#configure-extension-enablement-states +extensions: diff --git a/examples/hello_world_flutter/hello_world_flutter.gif b/examples/hello_world_flutter/hello_world_flutter.gif new file mode 100644 index 00000000..25058c38 Binary files /dev/null and b/examples/hello_world_flutter/hello_world_flutter.gif differ diff --git a/examples/hello_world_flutter/hello_world_flutter_local.gif b/examples/hello_world_flutter/hello_world_flutter_local.gif deleted file mode 100644 index d9359f00..00000000 Binary files a/examples/hello_world_flutter/hello_world_flutter_local.gif and /dev/null differ diff --git a/examples/hello_world_flutter/hello_world_flutter_openai.gif b/examples/hello_world_flutter/hello_world_flutter_openai.gif deleted file mode 100644 index d0547b08..00000000 Binary files a/examples/hello_world_flutter/hello_world_flutter_openai.gif and /dev/null differ diff --git a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist index 9625e105..7c569640 100644 --- a/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist +++ b/examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist @@ -21,6 +21,6 @@ CFBundleVersion 1.0 MinimumOSVersion - 11.0 + 12.0 diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj index d030af56..a50a737e 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj @@ -168,7 +168,7 @@ 97C146E61CF9000F007C117D /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1300; + LastUpgradeCheck = 1510; ORGANIZATIONNAME = ""; TargetAttributes = { 331C8080294A63A400263BE5 = { @@ -344,7 +344,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; @@ -472,7 +472,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = iphoneos; @@ -521,7 +521,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; diff --git a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index e42adcb3..8e3ca5df 100644 --- a/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ { - HomeScreenCubit() : super(const HomeScreenState()); + HomeScreenCubit() : super(const HomeScreenState()) { + _updateChain(); + } + + RunnableSequence? chain; - void onClientTypeChanged(final ClientType clientType) { - emit(state.copyWith(clientType: clientType, response: '')); + void onProviderChanged(final Provider provider) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + provider: provider, + response: '', + ), + ); + _updateChain(); } - void onOpenAIKeyChanged(final String openAIKey) { - emit(state.copyWith(openAIKey: openAIKey)); + void onModelChanged(final String model) { + final newModel = { + ...state.model, + state.provider: model, + }; + emit(state.copyWith(model: newModel)); + _updateChain(); } - void onLocalUrlChanged(final String localUrl) { - emit(state.copyWith(localUrl: localUrl)); + void onApiKeyChanged(final String apiKey) { + final newApiKey = { + ...state.apiKey, + state.provider: apiKey, + }; + emit(state.copyWith(apiKey: newApiKey)); + _updateChain(); + } + + void onBaseUrlChanged(final String baseUrl) { + final newBaseUrl = { + ...state.baseUrl, + state.provider: baseUrl, + }; + emit(state.copyWith(baseUrl: newBaseUrl)); + _updateChain(); } void onQueryChanged(final String query) { @@ -27,68 +62,106 @@ class HomeScreenCubit extends Cubit { } Future onSubmitPressed() async { - final config = _getClientConfig(); - if (config == null) { - return; - } - final (apiKey, baseUrl) = config; + if (!_validateInput()) return; + emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); - final query = state.query; - if (query == null || query.isEmpty) { + assert(chain != null); + final stream = chain!.stream(state.query).handleError(_onErrorGenerating); + await for (final result in stream) { emit( state.copyWith( status: HomeScreenStatus.idle, - error: HomeScreenError.queryEmpty, + response: (state.response) + result, ), ); - return; } + } - emit(state.copyWith(status: HomeScreenStatus.generating, response: '')); + bool _validateInput() { + final provider = state.provider; + if (provider.isRemote && (state.apiKey[provider] ?? '').isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.apiKeyEmpty, + ), + ); + return false; + } - final llm = ChatOpenAI( - apiKey: apiKey, - baseUrl: baseUrl ?? '', - ); + if (state.query.isEmpty) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.queryEmpty, + ), + ); + return false; + } - final result = await llm([ChatMessage.humanText(query)]); - emit( - state.copyWith( - status: HomeScreenStatus.idle, - response: result.content.trim(), - ), - ); + return true; } - (String? apiKey, String? baseUrl)? _getClientConfig() { - final clientType = state.clientType; + void _updateChain() { + try { + final provider = state.provider; + final model = state.model; + final apiKey = state.apiKey; - if (clientType == ClientType.openAI) { - final openAIKey = state.openAIKey; - if (openAIKey == null || openAIKey.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.openAIKeyEmpty, + final chatModel = switch (provider) { + Provider.googleAI => ChatGoogleGenerativeAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatGoogleGenerativeAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.mistral => ChatMistralAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatMistralAIOptions( + model: model[provider] ?? provider.defaultModel, + ), + ), + Provider.openAI => ChatOpenAI( + apiKey: apiKey[provider] ?? '', + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOpenAIOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } - - return (openAIKey, null); - } else { - final localUrl = state.localUrl; - if (localUrl == null || localUrl.isEmpty) { - emit( - state.copyWith( - status: HomeScreenStatus.idle, - error: HomeScreenError.localUrlEmpty, + Provider.ollama => ChatOllama( + baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl, + defaultOptions: ChatOllamaOptions( + model: model[provider] ?? provider.defaultModel, + ), ), - ); - return null; - } + } as BaseChatModel; - return (null, localUrl); + chain?.close(); + chain = Runnable.getMapFromInput('query') + .pipe( + ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + 'Your are a helpful assistant. Reply to the user using Markdown.', + ), + (ChatMessageType.human, '{query}'), + ]), + ) + .pipe(chatModel) + .pipe(const StringOutputParser()); + } catch (_) { + // Ignore invalid base URL exceptions } } + + void _onErrorGenerating(final Object error) { + emit( + state.copyWith( + status: HomeScreenStatus.idle, + error: HomeScreenError.generationError, + ), + ); + } } diff --git a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart index d76e34dd..c5a95466 100644 --- a/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart +++ b/examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart @@ -6,36 +6,40 @@ class HomeScreenState extends Equatable { const HomeScreenState({ this.status = HomeScreenStatus.idle, this.error, - this.clientType = ClientType.openAI, - this.openAIKey, - this.localUrl, - this.query, - this.response, + this.provider = Provider.ollama, + this.model = const {}, + this.apiKey = const {}, + this.baseUrl = const {}, + this.query = '', + this.response = '', }); final HomeScreenStatus status; final HomeScreenError? error; - final ClientType clientType; - final String? openAIKey; - final String? localUrl; - final String? query; - final String? response; + final Provider provider; + final Map model; + final Map apiKey; + final Map baseUrl; + final String query; + final String response; HomeScreenState copyWith({ final HomeScreenStatus? status, final HomeScreenError? error, - final ClientType? clientType, - final String? openAIKey, - final String? localUrl, + final Provider? provider, + final Map? model, + final Map? apiKey, + final Map? baseUrl, final String? query, final String? response, }) { return HomeScreenState( status: status ?? this.status, error: error, - clientType: clientType ?? this.clientType, - openAIKey: openAIKey ?? this.openAIKey, - localUrl: localUrl ?? this.localUrl, + provider: provider ?? this.provider, + model: model ?? this.model, + apiKey: apiKey ?? this.apiKey, + baseUrl: baseUrl ?? this.baseUrl, query: query ?? this.query, response: response ?? this.response, ); @@ -45,9 +49,10 @@ class HomeScreenState extends Equatable { List get props => [ status, error, - clientType, - openAIKey, - localUrl, + provider, + model, + apiKey, + baseUrl, query, response, ]; @@ -59,12 +64,9 @@ enum HomeScreenStatus { } enum HomeScreenError { - openAIKeyEmpty, - localUrlEmpty, + modelEmpty, + apiKeyEmpty, + baseUrlEmpty, queryEmpty, -} - -enum ClientType { - openAI, - local, + generationError, } diff --git a/examples/hello_world_flutter/lib/home/bloc/providers.dart b/examples/hello_world_flutter/lib/home/bloc/providers.dart new file mode 100644 index 00000000..4d9c364b --- /dev/null +++ b/examples/hello_world_flutter/lib/home/bloc/providers.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs + +enum Provider { + googleAI( + name: 'GoogleAI', + defaultModel: 'gemini-1.5-pro', + defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta', + isRemote: true, + ), + mistral( + name: 'Mistral', + defaultModel: 'mistral-small', + defaultBaseUrl: 'https://api.mistral.ai/v1', + isRemote: true, + ), + openAI( + name: 'OpenAI', + defaultModel: 'gpt-4o', + defaultBaseUrl: 'https://api.openai.com/v1', + isRemote: true, + ), + ollama( + name: 'Ollama', + defaultModel: 'llama3.2', + defaultBaseUrl: 'http://localhost:11434/api', + isRemote: false, + ); + + const Provider({ + required this.name, + required this.defaultModel, + required this.defaultBaseUrl, + required this.isRemote, + }); + + final String name; + final String defaultModel; + final String defaultBaseUrl; + final bool isRemote; +} diff --git a/examples/hello_world_flutter/lib/home/home_screen.dart b/examples/hello_world_flutter/lib/home/home_screen.dart index 2b46a017..5b117845 100644 --- a/examples/hello_world_flutter/lib/home/home_screen.dart +++ b/examples/hello_world_flutter/lib/home/home_screen.dart @@ -1,8 +1,10 @@ // ignore_for_file: public_member_api_docs import 'package:flutter/material.dart'; import 'package:flutter_bloc/flutter_bloc.dart'; +import 'package:flutter_markdown/flutter_markdown.dart'; import 'bloc/home_screen_cubit.dart'; +import 'bloc/providers.dart'; class HomeScreen extends StatelessWidget { const HomeScreen({super.key}); @@ -27,10 +29,7 @@ class _Scaffold extends StatelessWidget { backgroundColor: theme.colorScheme.inversePrimary, title: const Text('🦜️🔗 LangChain.dart'), ), - body: const Padding( - padding: EdgeInsets.all(16), - child: _Body(), - ), + body: const _Body(), ); } } @@ -38,146 +37,203 @@ class _Scaffold extends StatelessWidget { class _Body extends StatelessWidget { const _Body(); - @override - Widget build(final BuildContext context) { - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.clientType != current.clientType, - builder: (final context, final state) { - return Column( - mainAxisSize: MainAxisSize.min, - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - _ClientTypeSelector(state.clientType), - const SizedBox(height: 16), - if (state.clientType == ClientType.openAI) - const _OpenAIKeyTextField() - else - const _LocalUrlTextField(), - const SizedBox(height: 16), - const _QueryTextField(), - const SizedBox(height: 16), - const _SubmitButton(), - const SizedBox(height: 12), - const Divider(), - const SizedBox(height: 16), - const _Response(), - ], - ); - }, - ); - } -} - -class _ClientTypeSelector extends StatelessWidget { - const _ClientTypeSelector(this.selected); - - final ClientType selected; - @override Widget build(final BuildContext context) { final cubit = context.read(); - return Center( - child: SegmentedButton( - segments: const >[ - ButtonSegment( - value: ClientType.openAI, - label: Text('OpenAI'), - icon: Icon(Icons.cloud_outlined), - ), - ButtonSegment( - value: ClientType.local, - label: Text('Local'), - icon: Icon(Icons.install_desktop_outlined), + return BlocListener( + listenWhen: (final previous, final current) => + previous.error != current.error, + listener: (final context, final state) { + if (state.error == HomeScreenError.generationError) { + ScaffoldMessenger.of(context).showSnackBar( + const SnackBar( + content: Text( + 'An error occurred while generating the response', + ), + ), + ); + } + }, + child: SingleChildScrollView( + child: Padding( + padding: const EdgeInsets.all(16), + child: Column( + mainAxisSize: MainAxisSize.min, + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + const _ProviderSelector(), + const SizedBox(height: 16), + Row( + crossAxisAlignment: CrossAxisAlignment.start, + children: [ + Expanded(child: _ApiKeyTextField(cubit)), + const SizedBox(width: 16), + Expanded(child: _BaseUrlTextField(cubit)), + ], + ), + const SizedBox(height: 16), + _ModelTextField(cubit), + const SizedBox(height: 16), + _QueryTextField(cubit), + const SizedBox(height: 16), + const _SubmitButton(), + const SizedBox(height: 12), + const Divider(), + const SizedBox(height: 16), + const _Response(), + ], ), - ], - selected: {selected}, - onSelectionChanged: (final Set newSelection) { - cubit.onClientTypeChanged(newSelection.first); - }, + ), ), ); } } -class _OpenAIKeyTextField extends StatelessWidget { - const _OpenAIKeyTextField(); +class _ProviderSelector extends StatelessWidget { + const _ProviderSelector(); @override Widget build(final BuildContext context) { final cubit = context.read(); return BlocBuilder( buildWhen: (final previous, final current) => - previous.error != current.error, + previous.provider != current.provider, builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.openAIKey), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.password), - labelText: 'OpenAI API key', - filled: true, - errorText: state.error == HomeScreenError.openAIKeyEmpty - ? 'OpenAI API key cannot be empty' - : null, + return Center( + child: SegmentedButton( + segments: Provider.values + .map( + (final provider) => ButtonSegment( + value: provider, + label: Text(provider.name), + icon: Icon( + provider.isRemote + ? Icons.cloud_outlined + : Icons.install_desktop_outlined, + ), + ), + ) + .toList(), + selected: {state.provider}, + onSelectionChanged: (final Set newSelection) { + cubit.onProviderChanged(newSelection.first); + }, ), - obscureText: true, - onChanged: cubit.onOpenAIKeyChanged, ); }, ); } } -class _LocalUrlTextField extends StatelessWidget { - const _LocalUrlTextField(); +class _ModelTextField extends _BaseTextField { + const _ModelTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - controller: TextEditingController(text: state.localUrl), - decoration: InputDecoration( - prefixIcon: const Icon(Icons.link), - labelText: 'Local URL', - filled: true, - errorText: state.error == HomeScreenError.localUrlEmpty - ? 'Local URL cannot be empty' - : null, - ), - onChanged: cubit.onLocalUrlChanged, - ); - }, - ); - } + String get labelText => 'Model name'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.link; + + @override + HomeScreenError get errorType => HomeScreenError.modelEmpty; + + @override + String get errorText => 'Model name cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.model[state.provider] ?? state.provider.defaultModel; + + @override + void onTextChanged(final String value) => cubit.onModelChanged(value); } -class _QueryTextField extends StatelessWidget { - const _QueryTextField(); +class _ApiKeyTextField extends _BaseTextField { + const _ApiKeyTextField(this.cubit); + + final HomeScreenCubit cubit; @override - Widget build(final BuildContext context) { - final cubit = context.read(); - return BlocBuilder( - buildWhen: (final previous, final current) => - previous.error != current.error, - builder: (final context, final state) { - return TextField( - decoration: InputDecoration( - labelText: 'Enter question', - filled: true, - errorText: state.error == HomeScreenError.queryEmpty - ? 'Question cannot be empty' - : null, - ), - onChanged: cubit.onQueryChanged, - ); - }, - ); - } + String get labelText => 'API key'; + + @override + bool get obscureText => true; + + @override + IconData get prefixIcon => Icons.password; + + @override + HomeScreenError get errorType => HomeScreenError.apiKeyEmpty; + + @override + String get errorText => 'Api API key cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.apiKey[state.provider] ?? ''; + + @override + void onTextChanged(final String value) => cubit.onApiKeyChanged(value); +} + +class _BaseUrlTextField extends _BaseTextField { + const _BaseUrlTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Base URL'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.language; + + @override + HomeScreenError get errorType => HomeScreenError.baseUrlEmpty; + + @override + String get errorText => 'Base URL cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => + state.baseUrl[state.provider] ?? state.provider.defaultBaseUrl; + + @override + void onTextChanged(final String value) => cubit.onBaseUrlChanged(value); +} + +class _QueryTextField extends _BaseTextField { + const _QueryTextField(this.cubit); + + final HomeScreenCubit cubit; + + @override + String get labelText => 'Enter question'; + + @override + bool get obscureText => false; + + @override + IconData get prefixIcon => Icons.question_answer; + + @override + HomeScreenError get errorType => HomeScreenError.queryEmpty; + + @override + String get errorText => 'Question cannot be empty'; + + @override + String onProviderChanged(final HomeScreenState state) => ''; + + @override + void onTextChanged(final String value) => cubit.onQueryChanged(value); } class _SubmitButton extends StatelessWidget { @@ -211,7 +267,7 @@ class _Response extends StatelessWidget { return BlocBuilder( builder: (final context, final state) { final response = state.response; - if (response == null || response.isEmpty) { + if (response.isEmpty) { return const SizedBox.shrink(); } @@ -224,8 +280,10 @@ class _Response extends StatelessWidget { 'Response', style: theme.textTheme.headlineSmall, ), - SelectableText( - state.response ?? '', + Markdown( + data: state.response, + shrinkWrap: true, + padding: EdgeInsets.zero, ), ], ); @@ -233,3 +291,64 @@ class _Response extends StatelessWidget { ); } } + +abstract class _BaseTextField extends StatefulWidget { + const _BaseTextField(); + + String get labelText; + + bool get obscureText; + + IconData get prefixIcon; + + HomeScreenError get errorType; + + String get errorText; + + String onProviderChanged(final HomeScreenState state); + + void onTextChanged(final String value); + + @override + _BaseTextFieldState createState() => _BaseTextFieldState(); +} + +class _BaseTextFieldState extends State<_BaseTextField> { + late TextEditingController _controller; + + @override + void initState() { + super.initState(); + _controller = TextEditingController(); + } + + @override + Widget build(BuildContext context) { + return BlocBuilder( + buildWhen: (previous, current) => + previous.provider != current.provider || + previous.error != current.error, + builder: (context, state) { + _controller.text = widget.onProviderChanged(state); + return TextField( + controller: _controller, + obscureText: widget.obscureText, + decoration: InputDecoration( + prefixIcon: Icon(widget.prefixIcon), + labelText: widget.labelText, + filled: true, + errorText: + state.error == widget.errorType ? widget.errorText : null, + ), + onChanged: widget.onTextChanged, + ); + }, + ); + } + + @override + void dispose() { + _controller.dispose(); + super.dispose(); + } +} diff --git a/examples/hello_world_flutter/pubspec.lock b/examples/hello_world_flutter/pubspec.lock index 05dca7e4..4c5f7717 100644 --- a/examples/hello_world_flutter/pubspec.lock +++ b/examples/hello_world_flutter/pubspec.lock @@ -1,6 +1,22 @@ # Generated by pub # See https://dart.dev/tools/pub/glossary#lockfile packages: + _discoveryapis_commons: + dependency: transitive + description: + name: _discoveryapis_commons + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" + url: "https://pub.dev" + source: hosted + version: "1.0.7" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" async: dependency: transitive description: @@ -13,10 +29,10 @@ packages: dependency: transitive description: name: bloc - sha256: "3820f15f502372d979121de1f6b97bfcf1630ebff8fe1d52fb2b0bfa49be5b49" + sha256: "106842ad6569f0b60297619e9e0b1885c2fb9bf84812935490e6c5275777804e" url: "https://pub.dev" source: hosted - version: "8.1.2" + version: "8.1.4" characters: dependency: transitive description: @@ -37,10 +53,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -61,18 +77,18 @@ packages: dependency: transitive description: name: fetch_api - sha256: "74a1e426d41ed9c89353703b2d80400c5d0ecfa144b2d8a7bd8882fbc9e48787" + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" url: "https://pub.dev" source: hosted - version: "1.0.3" + version: "2.2.0" fetch_client: dependency: transitive description: name: fetch_client - sha256: "83c07b07a63526a43630572c72715707ca113a8aa3459efbc7b2d366b79402af" + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.2" fixnum: dependency: transitive description: @@ -90,26 +106,74 @@ packages: dependency: "direct main" description: name: flutter_bloc - sha256: f0ecf6e6eb955193ca60af2d5ca39565a86b8a142452c5b24d96fb477428f4d2 + sha256: b594505eac31a0518bdcb4b5b79573b8d9117b193cc80cc12e17d639b10aa27a url: "https://pub.dev" source: hosted - version: "8.1.5" + version: "8.1.6" + flutter_markdown: + dependency: "direct main" + description: + name: flutter_markdown + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 + url: "https://pub.dev" + source: hosted + version: "0.7.3+1" freezed_annotation: dependency: transitive description: name: freezed_annotation - sha256: c3fd9336eb55a38cc1bbd79ab17573113a8deccd0ecbbf926cca3c62803b5c2d + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 url: "https://pub.dev" source: hosted - version: "2.4.1" + version: "2.4.4" + gcloud: + dependency: transitive + description: + name: gcloud + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd + url: "https://pub.dev" + source: hosted + version: "0.8.13" + google_generative_ai: + dependency: transitive + description: + name: google_generative_ai + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be + url: "https://pub.dev" + source: hosted + version: "0.4.4" + google_identity_services_web: + dependency: transitive + description: + name: google_identity_services_web + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" + url: "https://pub.dev" + source: hosted + version: "0.3.1+4" + googleapis: + dependency: transitive + description: + name: googleapis + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" + url: "https://pub.dev" + source: hosted + version: "13.2.0" + googleapis_auth: + dependency: transitive + description: + name: googleapis_auth + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 + url: "https://pub.dev" + source: hosted + version: "1.6.0" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -118,43 +182,56 @@ packages: url: "https://pub.dev" source: hosted version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" json_annotation: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" langchain: dependency: "direct main" description: path: "../../packages/langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../packages/langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" + langchain_google: + dependency: "direct main" + description: + path: "../../packages/langchain_google" + relative: true + source: path + version: "0.6.3+1" + langchain_mistralai: + dependency: "direct main" + description: + path: "../../packages/langchain_mistralai" + relative: true + source: path + version: "0.2.3+1" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.3.2" langchain_openai: dependency: "direct main" description: path: "../../packages/langchain_openai" relative: true source: path - version: "0.6.1" + version: "0.7.2" langchain_tiktoken: dependency: transitive description: @@ -163,6 +240,14 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.1" + markdown: + dependency: transitive + description: + name: markdown + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 + url: "https://pub.dev" + source: hosted + version: "7.2.2" material_color_utilities: dependency: transitive description: @@ -175,10 +260,17 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" + mistralai_dart: + dependency: "direct overridden" + description: + path: "../../packages/mistralai_dart" + relative: true + source: path + version: "0.0.3+3" nested: dependency: transitive description: @@ -187,13 +279,20 @@ packages: url: "https://pub.dev" source: hosted version: "1.0.0" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.2.2" openai_dart: dependency: "direct overridden" description: path: "../../packages/openai_dart" relative: true source: path - version: "0.3.2" + version: "0.4.2" path: dependency: transitive description: @@ -206,18 +305,26 @@ packages: dependency: transitive description: name: provider - sha256: "9a96a0a19b594dbc5bf0f1f27d2bc67d5f95957359b461cd9feb44ed6ae75096" + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c + url: "https://pub.dev" + source: hosted + version: "6.1.2" + retry: + dependency: transitive + description: + name: retry + sha256: "822e118d5b3aafed083109c72d5f484c6dc66707885e07c0fbcb8b986bba7efc" url: "https://pub.dev" source: hosted - version: "6.1.1" + version: "3.1.2" rxdart: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter @@ -243,10 +350,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -267,10 +374,10 @@ packages: dependency: transitive description: name: uuid - sha256: cd210a09f7c18cbe5a02511718e0334de6559871052c90a90c0cca46a4aa81c8 + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.3.3" + version: "4.4.2" vector_math: dependency: transitive description: @@ -279,14 +386,21 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" + vertex_ai: + dependency: "direct overridden" + description: + path: "../../packages/vertex_ai" + relative: true + source: path + version: "0.1.0+2" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.0.0" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=1.16.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.19.0" diff --git a/examples/hello_world_flutter/pubspec.yaml b/examples/hello_world_flutter/pubspec.yaml index c000d972..f9fe1384 100644 --- a/examples/hello_world_flutter/pubspec.yaml +++ b/examples/hello_world_flutter/pubspec.yaml @@ -4,15 +4,19 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: flutter: sdk: flutter equatable: ^2.0.5 - flutter_bloc: ^8.1.5 - langchain: ^0.7.1 - langchain_openai: ^0.6.1 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 + langchain: ^0.7.6 + langchain_google: ^0.6.3+1 + langchain_mistralai: ^0.2.3+1 + langchain_ollama: ^0.3.2 + langchain_openai: ^0.7.2 flutter: uses-material-design: true diff --git a/examples/hello_world_flutter/pubspec_overrides.yaml b/examples/hello_world_flutter/pubspec_overrides.yaml index 93b5421a..5c8d37f9 100644 --- a/examples/hello_world_flutter/pubspec_overrides.yaml +++ b/examples/hello_world_flutter/pubspec_overrides.yaml @@ -1,10 +1,22 @@ -# melos_managed_dependency_overrides: langchain,langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_google,langchain_mistralai,langchain_ollama,langchain_openai,mistralai_dart,ollama_dart,openai_dart,vertex_ai dependency_overrides: langchain: path: ../../packages/langchain langchain_core: path: ../../packages/langchain_core + langchain_google: + path: ../../packages/langchain_google + langchain_mistralai: + path: ../../packages/langchain_mistralai + langchain_ollama: + path: ../../packages/langchain_ollama langchain_openai: path: ../../packages/langchain_openai + mistralai_dart: + path: ../../packages/mistralai_dart + ollama_dart: + path: ../../packages/ollama_dart openai_dart: path: ../../packages/openai_dart + vertex_ai: + path: ../../packages/vertex_ai diff --git a/examples/hello_world_flutter/web/flutter_bootstrap.js b/examples/hello_world_flutter/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/examples/hello_world_flutter/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/examples/hello_world_flutter/web/index.html b/examples/hello_world_flutter/web/index.html index add98e6a..68ffe01a 100644 --- a/examples/hello_world_flutter/web/index.html +++ b/examples/hello_world_flutter/web/index.html @@ -1,59 +1,25 @@ - + + + + - This is a placeholder for base href that will be replaced by the value of - the `--base-href` argument provided to `flutter build`. - --> - + + - - - - - - - - - - - - - - hello_world_flutter - - - - - + Hello World Flutter + - + diff --git a/examples/hello_world_flutter/web/manifest.json b/examples/hello_world_flutter/web/manifest.json index ab44f4f1..2332c807 100644 --- a/examples/hello_world_flutter/web/manifest.json +++ b/examples/hello_world_flutter/web/manifest.json @@ -1,11 +1,11 @@ { "name": "hello_world_flutter", - "short_name": "hello_world_flutter", + "short_name": "Hello World Flutter", "start_url": ".", "display": "standalone", "background_color": "#0175C2", "theme_color": "#0175C2", - "description": "A new Flutter project.", + "description": "A sample Flutter app integrating LangChain.", "orientation": "portrait-primary", "prefer_related_applications": false, "icons": [ diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.lock b/examples/vertex_ai_matching_engine_setup/pubspec.lock index a29715a0..b3a0f0ae 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.lock +++ b/examples/vertex_ai_matching_engine_setup/pubspec.lock @@ -5,18 +5,18 @@ packages: dependency: transitive description: name: _discoveryapis_commons - sha256: f8bb1fdbd77f3d5c1d62b5b0eca75fbf1e41bf4f6c62628f880582e2182ae45d + sha256: "113c4100b90a5b70a983541782431b82168b3cae166ab130649c36eb3559d498" url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.7" args: dependency: transitive description: name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" url: "https://pub.dev" source: hosted - version: "2.4.2" + version: "2.5.0" async: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" crypto: dependency: transitive description: @@ -45,58 +45,58 @@ packages: dependency: "direct main" description: name: gcloud - sha256: e9501083036d5f94027ce5afddd8ddae9b04121cf2fc6036b2cdd5663b52fca7 + sha256: b8fbff52ff1cfdb2bb3c53eb039c0ee3745618632969b60ec25d55b31fbb36dd url: "https://pub.dev" source: hosted - version: "0.8.12" + version: "0.8.13" google_identity_services_web: dependency: transitive description: name: google_identity_services_web - sha256: "0c56c2c5d60d6dfaf9725f5ad4699f04749fb196ee5a70487a46ef184837ccf6" + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" url: "https://pub.dev" source: hosted - version: "0.3.0+2" + version: "0.3.1+4" googleapis: dependency: transitive description: name: googleapis - sha256: "8a8c311723162af077ca73f94b823b97ff68770d966e29614d20baca9fdb490a" + sha256: "864f222aed3f2ff00b816c675edf00a39e2aaf373d728d8abec30b37bee1a81c" url: "https://pub.dev" source: hosted - version: "12.0.0" + version: "13.2.0" googleapis_auth: dependency: "direct main" description: name: googleapis_auth - sha256: "1401a9e55f9e0f565d3eebb18d990290f53a12d38a5f7f0230b112895778a85b" + sha256: befd71383a955535060acde8792e7efc11d2fccd03dd1d3ec434e85b68775938 url: "https://pub.dev" source: hosted - version: "1.5.1" + version: "1.6.0" http: dependency: "direct main" description: name: http - sha256: d4872660c46d929f6b8a9ef4e7a7eff7e49bbf0c4ec3f385ee32df5119175139 + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.1.2" + version: "1.2.2" http_parser: dependency: transitive description: name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" url: "https://pub.dev" source: hosted - version: "4.0.2" + version: "4.1.0" meta: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.15.0" path: dependency: transitive description: @@ -125,10 +125,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" term_glyph: dependency: transitive description: @@ -151,14 +151,14 @@ packages: path: "../../packages/vertex_ai" relative: true source: path - version: "0.1.0" + version: "0.1.0+2" web: dependency: transitive description: name: web - sha256: edc8a9573dd8c5a83a183dae1af2b6fd4131377404706ca4e5420474784906fa + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "1.0.0" sdks: - dart: ">=3.2.0 <4.0.0" + dart: ">=3.4.0 <4.0.0" diff --git a/examples/vertex_ai_matching_engine_setup/pubspec.yaml b/examples/vertex_ai_matching_engine_setup/pubspec.yaml index 34b972bf..c37f6c30 100644 --- a/examples/vertex_ai_matching_engine_setup/pubspec.yaml +++ b/examples/vertex_ai_matching_engine_setup/pubspec.yaml @@ -4,10 +4,10 @@ version: 1.0.0 publish_to: none environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - gcloud: ^0.8.12 - googleapis_auth: ^1.5.1 - http: ^1.1.0 - vertex_ai: ^0.1.0 + gcloud: ^0.8.13 + googleapis_auth: ^1.6.0 + http: ^1.2.2 + vertex_ai: ^0.1.0+2 diff --git a/examples/wikivoyage_eu/.gitignore b/examples/wikivoyage_eu/.gitignore new file mode 100644 index 00000000..3a857904 --- /dev/null +++ b/examples/wikivoyage_eu/.gitignore @@ -0,0 +1,3 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ diff --git a/examples/wikivoyage_eu/README.md b/examples/wikivoyage_eu/README.md new file mode 100644 index 00000000..74bbd8e2 --- /dev/null +++ b/examples/wikivoyage_eu/README.md @@ -0,0 +1,89 @@ +# Wikivoyage EU + +This example demonstrates how to build a fully local Retrieval Augmented Generation (RAG) pipeline with Llama 3 and ObjectBox using LangChain.dart and Ollama. + +> This example is adapted from [Ashmi Banerjee](https://ashmibanerjee.com)'s workshop "[Building a RAG using Google Gemma and MongoDB](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk)". + +![RAG Pipeline](rag.png) +*Figure 1: RAG Architecture (source: [Ashmi Banerjee](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk))* + +## Setup + +### 1. Install Ollama + +- Go to the [Ollama](https://ollama.ai/) website and download the latest version of the Ollama app. + +### 2. Download models + +- For this example we will be using the following models: + * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en) + * LLM: [`llama3.2`](https://ollama.com/library/llama3.2) +- Open your terminal and run: +```bash +ollama pull jina/jina-embeddings-v2-small-en +ollama run llama3.2 +``` + +### 3. Setup ObjectBox + +- We will be using [ObjectBox](https://objectbox.io) for our vector store. +- In order to use ObjectBox, we need to download the ObjectBox C library. You can find more information on how to do this [here](https://docs.objectbox.io/getting-started). +```bash +bash <(curl -s https://raw.githubusercontent.com/objectbox/objectbox-dart/main/install.sh) +``` + +### 4. Get dependencies + +```bash +dart pub get +``` + +## How it works + +The example has two scripts: +1. `injestion.dart`: This script reads the Wikivoyage dataset, creates embeddings from the data and stores it in the ObjectBox database. +2. `wikivoyage_eu.dart`: This script implements the chatbot implementing the RAG pipeline. + +### Ingestion + +We will be using data from [Wikivoyage](https://wikivoyage.org), a freely accessible online travel guide authored by volunteers. + +The `wikivoyage_eu_dataset.csv` file contains data from 160 European cities, including the city name, country, coordinates, population and a brief description: + +| city | country | lat | lng | population | abstract | +|-----------|-------------|---------|--------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Amsterdam | Netherlands | 52.3728 | 4.8936 | 1459402.0 | Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges. | + +The script does the following: +1. It uses LangChain.dart's `CsvLoader` to load the `wikivoyage_eu_dataset.csv` dataset. +2. It uses the `jina/jina-embeddings-v2-small-en` model to create embeddings for each city's data. The generated embeddings have 1024 dimensions. + + *As the data for each city is not very large, we won't be chunking it into smaller parts, but you could easily do that using the `RecursiveCharacterTextSplitter` class.* +3. It stores the embeddings in the ObjectBox vector database. + +You can run the script using: +```bash +$ dart run bin/injestion.dart +Added 160 documents to the vector store. +``` + +### Chatbot + +The chatbot script implements the RAG pipeline. It does the following: +1. Takes a user query as input. +2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query. +3. Retrieves the 5 most similar documents from the ObjectBox database. +4. Builds a prompt using the retrieved documents and the query. +5. Uses the `llama3.2` model to generate a response to the prompt. + +You can run the script using: +```bash +$ dart run bin/wikivoyage_eu.dart +``` + +![Wikivoyage EU](wikivoyage_eu.gif) + +## Conclusion + +This example demonstrates how to build a simple RAG pipeline that can run locally on your machine. You can easily extend this example to build more complex RAG pipelines with more advance retrieval and generation techniques. Check out the [LangChain.dart](https://langchaindart.dev/) documentation for more information. + +For simplicity, this example is a CLI application. However, you can easily adapt this code to work in a Flutter app. To get started with ObjectBox in Flutter, refer to the [ObjectBox documentation](https://docs.objectbox.io/getting-started). diff --git a/examples/wikivoyage_eu/analysis_options.yaml b/examples/wikivoyage_eu/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/examples/wikivoyage_eu/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/examples/wikivoyage_eu/bin/injestion.dart b/examples/wikivoyage_eu/bin/injestion.dart new file mode 100644 index 00000000..6aa7eaa3 --- /dev/null +++ b/examples/wikivoyage_eu/bin/injestion.dart @@ -0,0 +1,21 @@ +// ignore_for_file: avoid_print +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final loader = CsvLoader('bin/wikivoyage_eu_dataset.csv'); + final docs = await loader.load(); + + final embeddings = OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ); + final vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 512, + ); + + final ids = await vectorStore.addDocuments(documents: docs); + print('Added ${ids.length} documents to the vector store.'); + + embeddings.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu.dart b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart new file mode 100644 index 00000000..9fb076eb --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu.dart @@ -0,0 +1,82 @@ +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; + +void main() async { + final vectorStore = ObjectBoxVectorStore( + embeddings: OllamaEmbeddings( + model: 'jina/jina-embeddings-v2-small-en', + ), + dimensions: 512, + ); + + final retriever = vectorStore.asRetriever( + defaultOptions: VectorStoreRetrieverOptions( + searchType: ObjectBoxSimilaritySearch(k: 5), + ), + ); + final setupAndRetrieval = Runnable.fromMap({ + 'context': retriever.pipe( + Runnable.mapInput( + (docs) => docs.map((d) => d.pageContent).join('\n---\n'), + ), + ), + 'question': Runnable.passthrough(), + }); + + final promptTemplate = ChatPromptTemplate.fromTemplates(const [ + ( + ChatMessageType.system, + ''' +Here is some data from Wikivoyage about travel destinations in Europe: + + +{context} + + +Please read the Wikivoyage data carefully and consider how you can best answer the user's question using only the information provided. + +Use ANSI escape codes instead of Markdown to format your answer. +For example, `\x1B[1m\x1B[0m` will make "" bold. + +If the user's question is not about Europe, just respond with: +"I can only help you with vacation planning in Europe." +Do not provide any other suggestion if the question is not about Europe. +''' + ), + (ChatMessageType.human, '{question}'), + ]); + + final model = ChatOllama( + defaultOptions: const ChatOllamaOptions( + model: 'llama3.2', + ), + ); + const outputParser = StringOutputParser(); + final chain = setupAndRetrieval // + .pipe(promptTemplate) + .pipe(model) + .pipe(outputParser); + + stdout.writeln( + 'Hello! Ask me anything about your vacation plans in Europe, ' + 'and I will provide you with the best itinerary.', + ); + + while (true) { + stdout.write('> '); + final query = stdin.readLineSync() ?? ''; + + if (query.toLowerCase() == 'q') { + break; + } + + final stream = chain.stream(query); + await stream.forEach(stdout.write); + stdout.write('\n\n'); + } + + chain.close(); +} diff --git a/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv new file mode 100644 index 00000000..0e775870 --- /dev/null +++ b/examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv @@ -0,0 +1,161 @@ +city,country,lat,lng,population,abstract +Aalborg,Denmark,57.05,9.9167,143598.0,"Aalborg is the largest city in North Jutland, Denmark. Its population, as of 2016, is 134,672, making it the fourth largest city in Denmark." +Adana,Turkey,37.0,35.3213,1765981.0,"Adana is a city on the Cilician Plains of central Turkey, on the Seyhan River about 50 km from the Mediterranean coast. It's industrial and mostly modern but with several places of interest in its historic centre." +Amsterdam,Netherlands,52.3728,4.8936,1459402.0,"Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges." +Ancona,Italy,43.6169,13.5167,100924.0,Ancona is the capital of the Italian region called the Marches and an important port city on the coast of the Adriatic Sea. +Ankara,Turkey,39.93,32.85,5503985.0,"Ankara is the capital of Turkey, central within the country on the plateau of Central Anatolia. It's a sprawling modern place around an ancient citadel, and in 2022 had a population of almost 5." +Antalya,Turkey,36.8874,30.7075,2426356.0,"Antalya is a city in Pamphylia on the Turkish Mediterranean coast, and the chief resort of the ""Turkish Riviera"". It's a metropolis with a population of 2." +Arad,Romania,46.175,21.3125,159074.0,There is more than one place in the world called Arad. You might be looking for: +Arkhangelsk,Russia,64.55,40.5333,351488.0,"Arkhangelsk (population 350,000 in 2018) is a regional center in Northwestern Russia, located on both banks of Northern Dvina river near its mouth on the White Sea, about 1250 km by road to the north of Moscow and about 1100 km northeast of Saint Petersburg. It is part of the Silver Ring of cultural and historical centers of Northwestern Russia." +Astrakhan,Russia,46.35,48.035,532504.0,Astrakhan (Russian: А́страхань AH-struh-khun) is a city in Russia. +Baia Mare,Romania,47.6667,23.5833,123738.0,Baia Mare is a city in north-western Romania. +Baku,Azerbaijan,40.3667,49.8352,2300500.0,Baku (Azeri: Bakı) is the capital of Azerbaijan and is the largest city in the Caucasus. Baku's Old Town has UNESCO World Heritage status. +Barcelona,Spain,41.3825,2.1769,4800000.0,"Barcelona is Spain's second largest city, with a population of nearly two million people, and the capital of Catalonia. A major port on the northeastern Mediterranean coast of Spain, Barcelona has a wide variety of attractions that bring in tourists from around the globe." +Bari,Italy,41.1253,16.8667,323370.0,"Bari (Bari dialect: Bare) is the capital of the Apulia region of Italy, on the Adriatic Sea. With a population of 317,000 (in 2019), it's the second largest city in Southern Italy after Naples." +Batman,Turkey,37.887,41.132,447106.0,"Batman (pronounced as baat-maan, not like the name of the superhero; Kurdish: Iluh) is a city in southeastern Anatolia. It is the capital of an important oil producing province." +Belgrade,Serbia,44.82,20.46,1378682.0,"Belgrade (Serbian: Београд, Beograd) is the capital of the Republic of Serbia and the country's largest city. Belgrade has been re-emerging as a tourist destination in the past years." +Bergen,Norway,60.3894,5.33,267117.0,"Bergen is Norway's second largest city and the most popular gateway to the fjords of West Norway. The city is renowned for its great location amidst mountains, fjords, and the ocean." +Berlin,Germany,52.52,13.405,4473101.0,"Berlin is Germany's capital and biggest city. Within the city limits, Berlin in 2022 had a population of 3." +Bologna,Italy,44.4939,11.3428,392564.0,"Bologna (Emilian: Bulåggna) is a beautiful and historic city in the Emilia-Romagna region of Northeast Italy. It has the oldest university in the Western world, a lively student population, excellent food, a striking brick terracotta-roofed cityscape, and lots to see and do." +Bordeaux,France,44.84,-0.58,260958.0,"Bordeaux is a city in the Gironde region of southwest France, standing on the River Garonne. It's the country's fifth largest city, with a population of 259,809 in 2020, and another million living in its associated towns." +Braga,Portugal,41.5503,-8.42,181494.0,"Braga is one of the five largest cities of Portugal, situated in the Minho region in the North of the country. It is known for its abundance of churches and thus called the ""city of archbishops""." +Bratislava,Slovakia,48.1439,17.1097,475503.0,"Bratislava (Hungarian: Pozsony, German: Pressburg, known as Prešporok before 1919), is the capital and largest city of Slovakia. It has a population of more than 475,000 (2021), and is the administrative, cultural and economic centre of the country." +Bremen,Germany,53.0833,8.8,566573.0,"The Free Hanseatic City of Bremen is a city in northern Germany with a major port on the River Weser. The population is 567,000 (2020)." +Brest,Belarus,52.1347,23.6569,340723.0,There is more than one place called Brest: +Brno,Czechia,49.1925,16.6083,382405.0,"Brno (pronounced Bruhno) (German: Brünn, Štatl in the local dialect) is the major city of Moravia (a historical region in the Czech Republic). It is the largest city in Moravia and the second-largest city in the Czech Republic by population and area." +Brussels,Belgium,50.8467,4.3525,1743000.0,"Brussels (French: Bruxelles, Dutch: Brussel) is the capital of Belgium and one of the three administrative regions within the country, together with Flanders and Wallonia. Apart from its role within its country, it is also an internationally important city, hosting numerous international institutions, and in particular the headquarters of NATO and the core institutions of the European Union." +Budapest,Hungary,47.4925,19.0514,2997958.0,"Budapest is the capital city of Hungary. With a unique, youthful atmosphere, world-class classical music scene, a pulsating nightlife increasingly appreciated among European youth, and last but not least, an exceptional offer of natural thermal baths, Budapest is one of Europe's most delightful and enjoyable cities." +Burgas,Bulgaria,42.503,27.4702,210813.0,Burgas (also Bourgas) is a city on the Black Sea coast of Bulgaria. It is a large industrial centre with many tourist attractions in the region. +Bursa,Turkey,40.1833,29.05,2901396.0,"Bursa is a large city in the Southern Marmara region of Turkey, 20 km inland from the Marmara coast. It's the country's fourth-largest city, with a population of 2,161,990 in 2021, and with another million living in the wider metro area." +Bydgoszcz,Poland,53.1219,18.0003,346739.0,"Bydgoszcz (German: Bromberg) is a major city of 360,000 in Poland and with suburban area the agglomeration has nearly 500,000. It has well preserved 19th-century architecture and was known as Little Berlin before the world wars." +Cagliari,Italy,39.2278,9.1111,154106.0,"Cagliari (Sardinian: Casteddu, ""castle""; Latin: Caralis) is the capital city of the Italian island of Sardinia." +Cheboksary,Russia,56.1333,47.25,489498.0,"Cheboksary (Russian: Чебокса́ры chee-bahk-SAH-ree) is the capital of Chuvashia in the Volga Region of the Russian Federation. About 600,000 people live here and in the nearby satellite city Novocheboksarsk." +Chelyabinsk,Russia,55.15,61.4,1202371.0,"Chelyabinsk (Russian: Челя́бинск cheel-YAH-beensk) is a big city, with more than a million inhabitants, the capital of Chelyabinsk Oblast in the European part of Russia." +Cluj-Napoca,Romania,46.7667,23.5833,324576.0,"Cluj-Napoca (Romanian), Kolozsvár (Hungarian) or Klausenburg (German) is the capital of Cluj county and the unofficial capital of the historical region of Transylvania. The city, with about 320,000 people (2016), is very pleasant, and it is a great experience for those who want to see urban Transylvanian life at its best." +Coimbra,Portugal,40.2111,-8.4292,143396.0,"Coimbra is the traditional capital city of Central Portugal's historic Beira Litoral region. With over 140,000 inhabitants (2021), it is the largest municipality there and one of Portugal's four largest metropolises." +Copenhagen,Denmark,55.6761,12.5683,1366301.0,"Copenhagen (Danish: København) is the capital city of Denmark and forms the moderate conurbation that one million Danes call home. It is big enough to form a small Danish metropolis, with shopping, culture and nightlife par excellence, yet small enough still to feel intimate and be safe." +Cork,Ireland,51.8972,-8.47,222333.0,"Cork is the principal city of County Cork in southwest Ireland. It was already the second-largest city in Ireland when in 2019 its boundaries were extended, to have a population of 210,000." +Craiova,Romania,44.3333,23.8167,269506.0,"Craiova with 306,000 inhabitants (2016), is one of the five largest cities of Romania. Craiova is in the southwestern region of the country and hosts the administrative buildings of the Dolj County and of the Oltenia district." +Debrecen,Hungary,47.53,21.6392,328642.0,[a Nagytemplom télen.jpg|thumb|400px|The Great Church of Debrecen in winter] +Denizli,Turkey,37.7667,29.0833,1027782.0,"Denizli is a city in the Southern Aegean region of Turkey, which most visitors simply transit to reach Pamukkale 20 km north. It's a typical modern Turkish city, far from picturesque, but does have enough sights of its own if your schedule allows." +Dijon,France,47.3167,5.0167,158002.0,"Dijon is the largest city in the eastern French region of Bourgogne-Franche-Comté. Dijon is best known for its mustard (named after the town), which is no longer produced in its metropolitan area, but it is still one of the most beautiful cities in France, and its historic buildings and byways were not heavily damaged by bombing in World War II and are largely intact." +Donetsk,Ukraine,48.0028,37.8053,929063.0,"Donetsk (Ukrainian: Донецьк, Russian: Доне́цк) is a city in the Donetsk People's Republic, on the banks of the River Kalmius." +Dresden,Germany,51.05,13.74,561922.0,"Dresden is the capital of Saxony (Sachsen). It's often referred to locally as Elbflorenz, or ""Florence on the Elbe"", reflecting its location on the Elbe river and its historical role as a centre for the arts and beautiful architecture - much like Florence in Italy." +Dublin,Ireland,53.35,-6.2603,1173179.0,"Dublin (Irish: Baile Átha Cliath, ""Town of the Hurdled Ford"") is the capital city of Ireland. Its vibrancy, nightlife and tourist attractions are world renowned and it's the most popular entry point for international visitors to Ireland." +Erfurt,Germany,50.9833,11.0333,213835.0,Erfurt is the capital of the German state of Thuringia (Thüringen). The city is the largest one in that province and likewise a major transportation hub. +Erzincan,Turkey,39.7464,39.4914,157452.0,"Erzincan is a city in Eastern Anatolia. It's modern, on a grid pattern, as its predecessor was destroyed by an earthquake in 1939." +Erzurum,Turkey,39.9086,41.2769,767848.0,"Erzurum is a city in Eastern Anatolia, and is the hub for visiting eastern Turkey." +Gaziantep,Turkey,37.0628,37.3792,2028563.0,"Gaziantep is a city in Southeastern Anatolia. Although it is a major city in Turkey (counting almost 2 million inhabitants) and known as the Turkish capital of gastronomy, it counts very few international tourists." +Geneva,Switzerland,46.2017,6.1469,201818.0,thumb|200px|right|The old town of Geneva in the winter +Hamburg,Germany,53.55,10.0,2484800.0,"The Free and Hanseatic City of Hamburg (Freie und Hansestadt Hamburg) is Germany's second-largest city and, at the same time, one of Germany's 16 federal states or Bundesländer. Prior to the formation of the modern German state, Hamburg for centuries enjoyed a status as de facto independent city state and regional power and trade hub in the North Sea." +Helsinki,Finland,60.1708,24.9375,1268296.0,Helsinki (Helsingfors in Swedish) is Finland's capital and largest city. Helsinki combines modern and historic architectural styles with beautiful open spaces. +Innsbruck,Austria,47.2683,11.3933,132493.0,"Innsbruck is the fifth-largest city in Austria and the provincial capital of Tyrol, as well as one of the largest cities in the Alps. It is in a valley of the river Inn between mountain ranges of above 2000 m above sea level, halfway between Bavaria and northern Italy, and is a hub of a region popular for skiing and other mountain-related activities and a busy tourist destination." +Ioannina,Greece,39.6636,20.8522,113094.0,"Ioannina (Ιωάννινα) (population: 112,486 (2011)) is a beautiful city in Northern Greece whose old town is surrounded by tall defensive walls." +Isparta,Turkey,37.7647,30.5567,258375.0,"Isparta (Greek: Σπάρτη, Baris) is a city of 220,000 inhabitants in the Lakes District of Mediterranean Turkey." +Istanbul,Turkey,41.0136,28.955,16079000.0,"Istanbul (Turkish: İstanbul) is a very large city of fantastic history, culture and beauty. Called Byzantium in ancient times, the city's name was changed to Constantinople in 324 CE when it was rebuilt by the first Christian Roman Emperor, Constantine." +Ivano-Frankivsk,Ukraine,48.9228,24.7106,238196.0,"Ivano-Frankivsk (Ukrainian: Івано-Франківськ, also transliterated Ivano-Frankovsk from Russian: Ивано-Франковск) (formerly in Polish: Stanisławów, German: Stanislau) is a city in the Ukrainian part of East Galicia." +Izmir,Turkey,38.42,27.14,4320519.0,"thumb|270px|Clock tower in Konak Square, iconic symbol of the city" +Kahramanmaras,Turkey,37.5833,36.9333,443575.0,"Kahramanmaraş, which used to be known as Maraş, is a city in Turkey, located on the crossroad of southern, eastern and southeastern Turkey." +Kaliningrad,Russia,54.7003,20.4531,475056.0,"Kaliningrad (Russian: Калинингра́д kuh-leen-een-GRAHD) , also known by its original German name, Königsberg, is the capital city of Kaliningrad Oblast in Russia. It has about 475,000 inhabitants (2018)." +Kars,Turkey,40.6078,43.0958,115891.0,"Kars is a city in Eastern Anatolia. It is most frequently visited as a jumping off point for travelers going to Ani, but it is a viable destination in its own right for its 19th-century Russian imperial buildings, and, of course, its role as the setting for Orhan Pamuk's famous novel Snow." +Kaunas,Lithuania,54.8972,23.8861,381007.0,"Kaunas is the second-largest city in Lithuania, with a population of some 288,000 people. The main reason to visit is its charming Old Town, connected to the 19th century New Town ranged along Laisvės alėja." +Kayseri,Turkey,38.7225,35.4875,1389680.0,"Kayseri is a city in Central Anatolia, 350 km southeast of Ankara. In 2021 the population was 1." +Kazan,Russia,55.7964,49.1089,1243500.0,Kazan (Russian: Каза́нь kuh-ZAHN) is the capital of Russia's republic of Tatarstan and the center of the world Tatar culture. +Kharkiv,Ukraine,49.9925,36.2311,1446107.0,"Kharkiv (Ukrainian: Харків, also transliterated Kharkov from Russian: Харьков) is a major city in the Kharkiv region of Ukraine and is the second largest city in Ukraine with a population of over 1.5 million inhabitants." +Kiel,Germany,54.3233,10.1394,246601.0,"Kiel is the capital city of the German state of Schleswig-Holstein and has a population of roughly 248,000 (2018). It is located on the Baltic Sea at the end of the ""Kieler Förde""." +Kirov,Russia,58.6,49.65,501468.0,"Kirov (Russian: Ки́ров KEE-ruhf) is the capital city of Kirov Oblast, Russia." +Klagenfurt,Austria,46.6167,14.3,101403.0,Klagenfurt (Slovenian: Celovec) is the capital of Carinthia in Austria. It was one of the eight host cities in the 2008 European Football Championships. +Konya,Turkey,37.8667,32.4833,2232374.0,"Konya is a city in Central Anatolia in Turkey, known as the city of ""whirling dervishes"" and for its outstanding Seljuk architecture. In 2021 Konya metropolis had a population of 2,277,017, the sixth largest in Turkey, but the area of most interest is compact." +Krasnodar,Russia,45.0333,38.9667,948827.0,"Krasnodar is the capital of Krasnodar Krai in southern Russia, with a popolulation in 2018 of just under 900,000. Its main industries are based on agriculture and food." +Kutaisi,Georgia,42.25,42.7,147900.0,"Kutaisi is a city in the Rioni Region of Georgia. The city itself is very cinematographic and charming, and a visit to Kutaisi is almost mandatory to see the Bagrati Cathedral and Gelati Monastery, which are UNESCO World Heritage sites and offer views from the mountain slopes over the city and the Rioni River." +Lille,France,50.6278,3.0583,234475.0,"Lille (Dutch: Rijsel) is the capital of the Hauts-de-France region in northern France and the core of one of the largest metropolitan agglomerations in the country. Historically, it has also been the capital of Flanders, and later an industrial powerhouse, thanks to which it now boasts a large and handsome historic centre." +Ljubljana,Slovenia,46.0514,14.5061,286745.0,"Ljubljana (""lee-oo-blee-AH-nuh"") is the small but delightful capital of Slovenia. While the city's population had grown to 295,500 in 2020, the sights and amenities are concentrated in the charming old centre." +London,United Kingdom,51.5072,-0.1275,11262000.0,"Noisy, vibrant and truly multicultural, London is a megalopolis of people, ideas and frenetic energy. The capital and largest city of the United Kingdom sits on the River Thames in South-East England, Greater London has a population of a little over 9 million." +Luxembourg,Luxembourg,49.6117,6.1319,132780.0,"The Grand Duchy of Luxembourg (Luxembourgish: Groussherzogtum Lëtzebuerg, French: Grand-Duché de Luxembourg, German: Großherzogtum Luxemburg), is a landlocked Benelux country at the crossroads of Germanic and Latin cultures." +Lviv,Ukraine,49.8425,24.0322,724314.0,"Lviv (also spelled L'viv; Ukrainian: Львів; Polish: Lwów, German: Lemberg, Russian: Львов), formerly known as Lvov after its Russian name, is in Western Ukraine and used to be the capital of East Galicia. It's the biggest city of the region and a major Ukrainian cultural centre on the UNESCO World Heritage List." +Lyon,France,45.76,4.84,522969.0,"Lyon is the capital of the French administrative region of Auvergne-Rhône-Alpes. A city of half a million, Lyon alone is the country's third-largest city, but its metropolitan area is only second in population to Paris." +Maastricht,Netherlands,50.85,5.6833,277721.0,"By many considered to be the most beautiful city of the country, Maastricht is the southernmost city in the Netherlands. It's the capital of the province of Limburg and famous for what the Dutch call the ""Burgundian"" way of life." +Madrid,Spain,40.4169,-3.7033,6211000.0,"Madrid is Spain's capital and largest city. A city that has been marked by Spain's varied and tumultuous history, Madrid has some of Europe's most impressive cultural and architectural heritage, which includes grand avenues, plazas, buildings and monuments, world-class art galleries and museums, highly popular football teams, and cultural events of international fame for everyone." +Magdeburg,Germany,52.1317,11.6392,236188.0,"Magdeburg is the capital city of the Bundesland of Saxony-Anhalt, Germany, with a population of 240,000 (2018). Magdeburg has become a modern city with numerous interesting sights of high importance and uniqueness, as well as many parks, which make Magdeburg the third greenest city in Germany." +Malatya,Turkey,38.3486,38.3194,426381.0,thumb|350px|New Mosque at the central square +Milan,Italy,45.4669,9.19,1366180.0,"Milan (Italian: Milano; Milanese: Milan) is financially the most important city in Italy, and home to the Borsa Italiana stock exchange. It is the second most populous city proper in the country, but sits at the centre of Italy's largest urban and metropolitan area." +Minsk,Belarus,53.9,27.5667,2009786.0,"Minsk (Belarusian: Мінск, Russian: Минск) is the capital and largest city of the Republic of Belarus. Its population is about two million people in 2021." +Miskolc,Hungary,48.0833,20.6667,150695.0,"Miskolc, with population of about 157,000 (2017), is the third largest city in Hungary, located in the north-east of the country, east of Bükk mountains." +Moscow,Russia,55.7558,37.6178,17332000.0,"Since its founding in 1147, Moscow (Russian: Москва, Moskva) has been at the crossroads of history as the capital of empires and a frequent target for invaders. As the capital of the Russian Empire, the Soviet Union, and, today, the Russian Federation, it has played a central role in the development of the largest country in the world." +Munich,Germany,48.1375,11.575,2606021.0,"Munich (German: München, Bavarian: Minga) is the capital of the federal state of Bavaria in the south of Germany. Within the city limits, Munich in 2021 had a population of just under 1." +Murcia,Spain,37.9861,-1.1303,672773.0,You could be looking for: +Murmansk,Russia,68.9706,33.075,298096.0,"Murmansk (Russian: Му́рманск) is a city in the extreme northwest of Russia and the world's largest city north of the Arctic Circle. It lies in the Kola Bay on the Kola Peninsula, by the Barents Sea." +Mykolaiv,Ukraine,46.975,31.995,498748.0,"Mykolaiv (Ukrainian: Миколаїв, also transliterated Nikolaev or Nikolayev from Russian: Николаев) is a city in Southern Ukraine. It is an important shipbuilding centre and transportation hub for Ukraine, and has a large military presence." +Nalchik,Russia,43.4833,43.6167,265162.0,"Nalchik is the capital city of Kabardino-Balkaria, a republic located in the very south of the Russian Federation." +Nantes,France,47.2181,-1.5528,318808.0,"Nantes (Breton: Naoned) is the capital of Pays de la Loire region in northwest France. Historically it was part of Brittany, whose dukes built up its castle and made the town their capital." +Naples,Italy,40.8333,14.25,966144.0,"Naples (Italian: Napoli; Neapolitan: Napule) in Italy, an ancient port on the Mediterranean sea. With just short of a million citizens, is the third most populous municipality." +Nevsehir,Turkey,38.6264,34.7139,153117.0,"Nevşehir is one of the major cities in Cappadoccia Region, which displays a beautiful combination of nature and history. The traditional main sources of income of the city, carpet weaving and viticulture, have been overtaken by tourism, because of its proximity to the underground shelters, the fairy chimneys, monasteries, caravanserais and the famous rock-hewn churches of Göreme." +Nicosia,Cyprus,35.1725,33.365,330000.0,Nicosia (Greek: Λευκωσία; Turkish: Lefkoşa) is the capital of Cyprus and is the largest city by far. +Novi Sad,Serbia,45.2542,19.8425,380000.0,thumb|right|350px|Freedom square (Trg Slobode) +Oradea,Romania,47.0722,21.9211,196367.0,"Oradea is one the few undiscovered gems of Romania's tourism. Despite being one of the largest and most important cities in Transylvania, and having a high degree of administrative, economic and commercial importance, it is often overlooked by tourists in favor of other Transylvanian cities such as Brasov, Sibiu, Sighisoara or Cluj-Napoca." +Orenburg,Russia,51.7667,55.1,564773.0,"Orenburg (Russian: Оренб'ург, Uh-rehn-BOORK) is the capital of Orenburg Oblast. Every citizen will point you the sign at the bridge across the Ural river, supposedly landmarking the geographical border between Europe and Asia (the actual boundary is further east)." +Pamplona,Spain,42.8167,-1.65,203418.0,"Pamplona (Basque: Iruña) is a city in Navarra, Spain. It is most famous world-wide for its San Fermín festival, held each year from July 6-14." +Paris,France,48.8567,2.3522,11060000.0,thumb|300px|The Eiffel Tower and the river Seine +Penza,Russia,53.2,45.0,523726.0,There's more than one place called Penza: +Perm,Russia,58.0139,56.2489,1048005.0,"Perm (Russian: Пермь p`yehr`m`) is a city in Perm Krai, Russia." +Perugia,Italy,43.1122,12.3889,165683.0,"Perugia is a city in the Italian region of Umbria. It has an important university that attracts many foreign students, is a major center of medieval art, has a stunningly beautiful central area and is home of the Umbria Jazz Festival." +Petrozavodsk,Russia,61.7833,34.35,278551.0,thumb|350 px|Old and New Petrozavodsk +Plovdiv,Bulgaria,42.15,24.75,383540.0,thumb|Old Plovdiv +Podgorica,Montenegro,42.4413,19.2629,150977.0,"Podgorica (Montenegrin: Подгорица) is the capital of Montenegro. While not a typical European eye candy, the city is definitely worth visiting, owing to its interesting mix of old and new, its café culture and nightlife, and its laid back Mediterranean atmosphere." +Porto,Portugal,41.1621,-8.622,1278210.0,"Porto is Portugal's second largest city and the capital of the Northern region, and a busy industrial and commercial centre. The city isn't very populous (about 238,000 inhabitants in 2024), but the Porto metropolitan area has some 1." +Prague,Czechia,50.0875,14.4214,1335084.0,"Prague (Czech: Praha) is the capital and largest city of the Czech Republic. The city's historic buildings and narrow, winding streets are testament to its centuries-old role as capital of the historic region of Bohemia." +Pristina,Kosovo,42.6633,21.1622,161751.0,"Pristina (Albanian: Prishtinë, Serbian: Priština), the capital city of Kosovo, is not beautiful: it is messy, with centuries-old Ottoman heritage competing with communist designs and post-communist architectural monstrosities. However, there is a powerful draw to this city of 162,000 people (2011), offering much to passing visitors." +Pskov,Russia,57.8167,28.3333,209840.0,"Pskov is the largest city and administrative capital of Pskov Oblast. One of the oldest cities in the country, it has preserved many unique architectural monuments of the 12th-16th centuries." +Rennes,France,48.1147,-1.6794,220488.0,"Rennes is the chief city of Brittany in northwest France. It's mostly modern and industrial, but has many grand 18th and 19th century buildings, and survivors of earlier times." +Riga,Latvia,56.9489,24.1064,920643.0,"Riga is the financial, creative, and cultural centre of Latvia. It is the capital and the largest city in Latvia, it is also the largest city in the Baltic States." +Rijeka,Croatia,45.3272,14.4411,191293.0,"Rijeka (literally ""River"" in Croatian language) is a city in Kvarner Bay, a northern inlet of the Adriatic Sea in Croatia. It is the principal seaport of the country." +Rivne,Ukraine,50.6192,26.2519,246574.0,"Rivne (Ukrainian: Рівне, also transliterated Rovno from Russian: Ровно) (Polish: Równe) is a city in Western Ukraine." +Rome,Italy,41.8931,12.4828,2872800.0,"Rome (Italian and Latin: Roma), the 'Eternal City', is the capital and largest city of Italy and of the Lazio region. It's the famed city of the Roman Empire, the Seven Hills, La Dolce Vita, the Vatican City and Three Coins in the Fountain." +Rouen,France,49.4428,1.0886,112321.0,"Rouen is the capital of the French region of Upper Normandy on the River Seine, 135 km (approximately 90 minutes drive) northwest from the centre of Paris. The city has a population of 110,000 and its metropolitan area includes some 666,000 inhabitants (2017)." +Saint Petersburg,Russia,59.95,30.3167,5384342.0,"Saint Petersburg (Russian: Са́нкт-Петербу́рг Sankt-Peterburg), known as Petrograd (Петроград) in 1914-1924 and Leningrad (Ленинград) in 1924-1991, is the second largest city of Russia, with 5.6 million inhabitants (2021), and the former capital of the Russian Empire." +Salzburg,Austria,47.8,13.045,155021.0,"Salzburg is a city in Austria, near the border with Germany's Bavaria state, with a population of 157,000 (2020). It was the setting for the 1965 movie The Sound of Music, so you may think you know all there is to see in Salzburg if you have seen the movie." +Samara,Russia,53.2028,50.1408,1169719.0,thumb|300px|Iversky Convent +Samsun,Turkey,41.2903,36.3336,1335716.0,"Samsun, in the Central Karadeniz region of Turkey, is the largest city on the Turkish Black Sea coast." +Santander,Spain,43.4628,-3.805,172221.0,"Santander is the capital and largest city of the province of Cantabria in Spain. It's on the north coast, with many beaches, ferries from Britain, and a small historic centre." +Sarajevo,Bosnia and Herzegovina,43.8564,18.4131,419957.0,"Sarajevo is the capital of Bosnia and Herzegovina, and its largest city, with 420,000 citizens in its urban area (2013). Sarajevo metropolitan area that has a population of 555,000 also includes some neighbourhoods of ""East Sarajevo"" that are a part of Republika Srpska." +Saratov,Russia,51.5333,46.0167,845300.0,Saratov (Russian: Сара́тов suh-RAH-tuhf) is a city in the Volga region of Russia. +Satu Mare,Romania,47.79,22.89,102411.0,"Satu Mare is a city in the Maramureș region of Romania. As of 2021, it had a population of 91,520." +Sibiu,Romania,45.7928,24.1519,147245.0,"Sibiu is a town in southern Transylvania, Romania, 280 km by road from Bucharest. The old town centre is very attractive." +Siirt,Turkey,37.925,41.9458,166332.0,Siirt is a city in Southeastern Anatolia. +Simferopol,Ukraine,44.9484,34.1,341799.0,"Simferopol (Russian: Симферополь, Ukrainian: Сімферополь) is the capital city of the Crimea." +Sivas,Turkey,39.75,37.0167,377561.0,"Sivas is a city in Central Anatolia, with a population in 2020 of 335,570. By road it's 450 km east of Ankara, and stands at 1278 m elevation." +Skopje,Macedonia,41.9961,21.4317,640000.0,"Skopje (Macedonian: Скопје, Albanian: Shkup, Turkish: Üsküp) is the capital and largest city of the Republic of North Macedonia. Skopje is city of many cultures and many centuries." +Sofia,Bulgaria,42.7,23.33,1547779.0,Sofia (София) is the capital of Bulgaria. It is also the biggest city in the country with about 2 million citizens (including suburbs). +Stavanger,Norway,58.97,5.7314,237369.0,"Stavanger is Norway's fourth largest city, at 145,000 citizens (2021). It is the largest city in, and the administrative centre of, Rogaland county in West Norway." +Stavropol,Russia,45.05,41.9833,450680.0,Stravropol (Ставрополь) is a city in Russia. +Stockholm,Sweden,59.3294,18.0686,1611776.0,"Stockholm is Sweden's capital and largest city, with nearly a million inhabitants in the city, and 2.4 million within Stockholm County (as of 2021)." +Strasbourg,France,48.5833,7.7458,290576.0,"thumb|300px|Strasbourg railway station, known for the sky dome" +Stuttgart,Germany,48.7775,9.18,2787724.0,"Stuttgart is the capital of the Bundesland of Baden-Württemberg in Germany. With a population of approximately 632,000 in the immediate city (2017) and more than 5." +Syktyvkar,Russia,61.6667,50.8167,245313.0,thumb|300px|Street scene in Syktyvkar. +Szczecin,Poland,53.4325,14.5481,403833.0,"Szczecin, (pronounced Shchetsin, German: Stettin, Latin: Stetinum) is a maritime port city and the capital of Zachodniopomorskie in Poland. The city has a population of over 400,000, with almost 780,000 living in its metro area (2019)." +Tallinn,Estonia,59.4372,24.7453,438341.0,"Tallinn is Estonia's capital and largest city. Tallinn is an important port of the Baltic Sea, with the busy passenger section of the port reaching the foothill of the picturesque medieval Old Town, which has been astonishingly well preserved and was inscribed on the UNESCO World Heritage List in 1997." +Tampere,Finland,61.4981,23.76,334112.0,thumb|350px|View to Näsinneula tower in Tampere +Tbilisi,Georgia,41.7225,44.7925,1118035.0,"Tbilisi (Georgian: , Russian: ), is the capital city of the country of Georgia, lying on the banks of the Mtkvari River. The metropolitan area covers 726 km² (280 mi²) and has a population of approximately 1." +Thessaloniki,Greece,40.6403,22.9347,824676.0,"Thessaloniki (Greek: Θεσσαλονίκη, Albanian, Turkish: Selanik, Serbian, Bulgarian, Macedonian: Солун, Solun, Judaeo-Spanish: סאלוניקו / Saloniko, Romanian: Salonic, Aromanian: Sãrunã, French: Salonique) is the capital of the administrative region of Central Macedonia and the whole historical region of Macedonia, Greece, and is, at about one million inhabitants (2011), the second largest city in the country. More importantly, it is a city with a continuous 3,000-year history, preserving relics of its Roman, Byzantine and Ottoman past and of its formerly dominant Jewish population." +Tirana,Albania,41.3289,19.8178,418495.0,"Tirana (Albanian: Tiranë) is the bustling and relatively modernised capital of Albania. It is the most important economic, financial, political and trade centre in the country." +Toulouse,France,43.6045,1.444,493465.0,"Toulouse is the chief city of Haute-Garonne in the Occitanie region of France. It stands north of the Pyrenees on the River Garonne, halfway between the Atlantic and the Mediterranean." +Trabzon,Turkey,41.005,39.7225,426882.0,"Trabzon (formerly Trebizond) is the largest city in the Eastern Karadeniz region of Turkey. Trabzon functioned as an independent state or empire during several periods in its long history, ruling over a vast area from Sinop in the west to Georgia in the east, even including territory in Crimea." +Turku,Finland,60.45,22.2667,252468.0,"Turku (Swedish: Åbo) is Finland's oldest city and the biggest one until the mid 1800s. Believed to have been founded in the early 13th century, it is the cradle of modern Finnish culture and has extensively influenced Finnish history." +Ufa,Russia,54.7261,55.9475,1115560.0,"Ufa (Russian: Уфа́ oo-FAH, Bashkirː ӨФӨ oe-FOE), the capital of Bashkortostan, is a large, interesting, and rapidly developing city, with a population of over 1.1 million in 2018." +Uzhhorod,Ukraine,48.6239,22.295,114897.0,"Uzhhorod (Ukrainian: Ужгород, also transliterated Uzhgorod from Russian: Ужгород; Hungarian: Ungvár, German: Uschhorod) is a city in Western Ukraine, the administrative center of Zakarpatska Oblast (Transcarpthian Region). The population of Uzhhorod is multiethnic." +Valencia,Spain,39.47,-0.3764,792492.0,"Valencia (València in Catalan/Valencian) is a charming old city and the capital of the Valencian Community. With just over 830,000 inhabitants in 2023, it is Spain’s third-largest city and, after Barcelona, the most significant cultural centre along the Spanish Mediterranean coast." +Valladolid,Spain,41.6528,-4.7236,297775.0,You may be looking for: +Van,Turkey,38.4942,43.38,353419.0,"Van (pronounced vahn in Turkish, wahn in Kurdish) is a city in Eastern Anatolia, Turkey. For Turks from the other regions of Turkey, it has a surprising beach resort feel in an area where their country is farthest from the sea." +Varna,Bulgaria,43.2167,27.9167,348668.0,"Varna (Варна) is a large city on the Black Sea coast in the northeast of Bulgaria. It's the larger of the country's two major sea ports (the other one is Burgas), and a gateway to the seaside resorts on the northern part of the coast." +Vienna,Austria,48.2083,16.3725,1973403.0,"Vienna (German: Wien; Austro-Bavarian: Wean) is the capital of Austria and by far its most populous city, with an urban population of 2 million and a metropolitan population of 2.9 million (2023)." +Vilnius,Lithuania,54.6872,25.28,708203.0,"Vilnius is the capital and largest city of Lithuania. It has a beautiful baroque Old Town, listed as a , and excellent tourist facilities in all price ranges." +Vinnytsia,Ukraine,49.2333,28.4833,371855.0,"Vinnytsia (Ukrainian: Вінниця, also transliterated Vinnitsa from Russian: Винница) is a city in Central Ukraine, the administrative center of the Vinnytsia region. 267 km southwest of Kyiv, it has been known since the Middle Ages, and is home to a former Soviet Cold War airbase." +Vitoria-Gasteiz,Spain,42.85,-2.6833,253672.0,"Vitoria-Gasteiz (Spanish: Vitoria, Basque: Gasteiz) is in the heart of the Basque Country in Spain. The old town has some of the best preserved medieval streets and plazas in the region and it is one of very few cities with two cathedrals." +Vladikavkaz,Russia,43.04,44.6775,306978.0,Vladikavkaz is the capital city of North Ossetia and a major transit hub for the North Caucasus region. Its position on the Georgian Military Highway makes it a staging post for journeys to both Georgia and South Ossetia. +Volgograd,Russia,48.7086,44.5147,1015586.0,"Volgograd (Russian: Волгогра́д vuhl-gah-GRAHD) is a large city along the west bank of the Volga River in Southern Russia. It used to be known as Stalingrad, a name which the city is still known as on several war-related dates each year (according to local legislation)." +Voronezh,Russia,51.6717,39.2106,1050602.0,[of the Annunciation] +Warsaw,Poland,52.23,21.0111,1860281.0,Warsaw (Polish: Warszawa) is Poland's capital and largest city. Warsaw is a bustling metropolis and one of the European Union's fastest-developing capitals and the Union's ninth most populous urban centre. +Zagreb,Croatia,45.8167,15.9833,809268.0,thumb|350px|right|Ban Jelačić Square +Zaporizhzhia,Ukraine,47.85,35.1175,741717.0,"Zaporizhzhia (Ukrainian: Запоріжжя, also transliterated Zaporozhye from Russian: Запорожье) is a city in Ukraine." +Zaragoza,Spain,41.65,-0.8833,675301.0,"Zaragoza is the capital and largest city of Aragon in Spain, and one of Spain's five largest cities, but it is one of the least known outside of Spain. Founded on the river Ebro during the Roman Empire as Cesaraugusta, Zaragoza now holds a large cultural and architectural heritage attesting to 2,000 years of affluence and importance." +Zurich,Switzerland,47.3744,8.5411,436332.0,"Zurich (German: Zürich, Swiss German: Züri) is the largest city in Switzerland, with a population of some 435,000 (2018) in the city, and 1.3 million (2009) in the metro area." diff --git a/examples/wikivoyage_eu/pubspec.lock b/examples/wikivoyage_eu/pubspec.lock new file mode 100644 index 00000000..f242c95d --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.lock @@ -0,0 +1,343 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + beautiful_soup_dart: + dependency: transitive + description: + name: beautiful_soup_dart + sha256: "57e23946c85776dd9515a4e9a14263fff37dbedbd559bc4412bf565886e12b10" + url: "https://pub.dev" + source: hosted + version: "0.3.0" + characters: + dependency: transitive + description: + name: characters + sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + collection: + dependency: transitive + description: + name: collection + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + url: "https://pub.dev" + source: hosted + version: "1.19.0" + cross_file: + dependency: transitive + description: + name: cross_file + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" + url: "https://pub.dev" + source: hosted + version: "0.3.4+2" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + csslib: + dependency: transitive + description: + name: csslib + sha256: "706b5707578e0c1b4b7550f64078f0a0f19dec3f50a178ffae7006b0a9ca58fb" + url: "https://pub.dev" + source: hosted + version: "1.0.0" + csv: + dependency: transitive + description: + name: csv + sha256: c6aa2679b2a18cb57652920f674488d89712efaf4d3fdf2e537215b35fc19d6c + url: "https://pub.dev" + source: hosted + version: "6.0.0" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + url: "https://pub.dev" + source: hosted + version: "2.2.0" + fetch_client: + dependency: transitive + description: + name: fetch_client + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + url: "https://pub.dev" + source: hosted + version: "1.1.2" + ffi: + dependency: transitive + description: + name: ffi + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" + url: "https://pub.dev" + source: hosted + version: "2.1.3" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + flat_buffers: + dependency: transitive + description: + name: flat_buffers + sha256: "380bdcba5664a718bfd4ea20a45d39e13684f5318fcd8883066a55e21f37f4c3" + url: "https://pub.dev" + source: hosted + version: "23.5.26" + freezed_annotation: + dependency: transitive + description: + name: freezed_annotation + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + url: "https://pub.dev" + source: hosted + version: "2.4.4" + html: + dependency: transitive + description: + name: html + sha256: "3a7812d5bcd2894edf53dfaf8cd640876cf6cef50a8f238745c8b8120ea74d3a" + url: "https://pub.dev" + source: hosted + version: "0.15.4" + http: + dependency: transitive + description: + name: http + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + url: "https://pub.dev" + source: hosted + version: "1.2.2" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + url: "https://pub.dev" + source: hosted + version: "4.1.0" + iregexp: + dependency: transitive + description: + name: iregexp + sha256: "143859dcaeecf6f683102786762d70a47ef8441a0d2287a158172d32d38799cf" + url: "https://pub.dev" + source: hosted + version: "0.1.2" + json_annotation: + dependency: transitive + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_path: + dependency: transitive + description: + name: json_path + sha256: "7a06bbb1cfad390b20fb7a2ca5e67d9ba59633879c6d71142b80fbf61c3b66f6" + url: "https://pub.dev" + source: hosted + version: "0.7.4" + langchain: + dependency: "direct main" + description: + path: "../../packages/langchain" + relative: true + source: path + version: "0.7.6" + langchain_community: + dependency: "direct main" + description: + path: "../../packages/langchain_community" + relative: true + source: path + version: "0.3.2" + langchain_core: + dependency: "direct overridden" + description: + path: "../../packages/langchain_core" + relative: true + source: path + version: "0.3.6" + langchain_ollama: + dependency: "direct main" + description: + path: "../../packages/langchain_ollama" + relative: true + source: path + version: "0.3.2" + langchain_tiktoken: + dependency: transitive + description: + name: langchain_tiktoken + sha256: c1804f4b3e56574ca67e562305d9f11e3eabe3c8aa87fea8635992f7efc66674 + url: "https://pub.dev" + source: hosted + version: "1.0.1" + math_expressions: + dependency: transitive + description: + name: math_expressions + sha256: e32d803d758ace61cc6c4bdfed1226ff60a6a23646b35685670d28b5616139f8 + url: "https://pub.dev" + source: hosted + version: "2.6.0" + maybe_just_nothing: + dependency: transitive + description: + name: maybe_just_nothing + sha256: "0c06326e26d08f6ed43247404376366dc4d756cef23a4f1db765f546224c35e0" + url: "https://pub.dev" + source: hosted + version: "0.5.3" + meta: + dependency: transitive + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + objectbox: + dependency: transitive + description: + name: objectbox + sha256: "70ff2a7538f6f8bb56136734d574f5bdc1cf29c50cd7207a14ea0c641ecb88ca" + url: "https://pub.dev" + source: hosted + version: "4.0.1" + ollama_dart: + dependency: "direct overridden" + description: + path: "../../packages/ollama_dart" + relative: true + source: path + version: "0.2.2" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + petitparser: + dependency: transitive + description: + name: petitparser + sha256: c15605cd28af66339f8eb6fbe0e541bfe2d1b72d5825efc6598f3e0a31b9ad27 + url: "https://pub.dev" + source: hosted + version: "6.0.2" + rfc_6901: + dependency: transitive + description: + name: rfc_6901 + sha256: df1bbfa3d023009598f19636d6114c6ac1e0b7bb7bf6a260f0e6e6ce91416820 + url: "https://pub.dev" + source: hosted + version: "0.2.0" + rxdart: + dependency: transitive + description: + name: rxdart + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" + url: "https://pub.dev" + source: hosted + version: "0.28.0" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + sprintf: + dependency: transitive + description: + name: sprintf + sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + tavily_dart: + dependency: "direct overridden" + description: + path: "../../packages/tavily_dart" + relative: true + source: path + version: "0.1.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + uuid: + dependency: transitive + description: + name: uuid + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" + url: "https://pub.dev" + source: hosted + version: "4.4.2" + vector_math: + dependency: transitive + description: + name: vector_math + sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + web: + dependency: transitive + description: + name: web + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + url: "https://pub.dev" + source: hosted + version: "1.0.0" +sdks: + dart: ">=3.4.0 <4.0.0" diff --git a/examples/wikivoyage_eu/pubspec.yaml b/examples/wikivoyage_eu/pubspec.yaml new file mode 100644 index 00000000..5782a40f --- /dev/null +++ b/examples/wikivoyage_eu/pubspec.yaml @@ -0,0 +1,12 @@ +name: wikivoyage_eu +description: Wikivoyage EU chatbot using llama3.2 and ObjectBox. +version: 1.0.0 +publish_to: none + +environment: + sdk: ">=3.4.0 <4.0.0" + +dependencies: + langchain: ^0.7.6 + langchain_ollama: ^0.3.2 + langchain_community: 0.3.2 diff --git a/examples/wikivoyage_eu/pubspec_overrides.yaml b/examples/wikivoyage_eu/pubspec_overrides.yaml new file mode 100644 index 00000000..5814891d --- /dev/null +++ b/examples/wikivoyage_eu/pubspec_overrides.yaml @@ -0,0 +1,14 @@ +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_ollama,ollama_dart,tavily_dart +dependency_overrides: + langchain: + path: ../../packages/langchain + langchain_community: + path: ../../packages/langchain_community + langchain_core: + path: ../../packages/langchain_core + langchain_ollama: + path: ../../packages/langchain_ollama + ollama_dart: + path: ../../packages/ollama_dart + tavily_dart: + path: ../../packages/tavily_dart diff --git a/examples/wikivoyage_eu/rag.png b/examples/wikivoyage_eu/rag.png new file mode 100644 index 00000000..ca46092d Binary files /dev/null and b/examples/wikivoyage_eu/rag.png differ diff --git a/examples/wikivoyage_eu/wikivoyage_eu.gif b/examples/wikivoyage_eu/wikivoyage_eu.gif new file mode 100644 index 00000000..84dd58e2 Binary files /dev/null and b/examples/wikivoyage_eu/wikivoyage_eu.gif differ diff --git a/melos.yaml b/melos.yaml index 51805716..b1835a9d 100644 --- a/melos.yaml +++ b/melos.yaml @@ -14,56 +14,61 @@ command: branch: main changelogs: - path: CHANGELOG.md - description: Check out the #announcements channel in the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details about each release. + description: "📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details." packageFilters: no-private: true bootstrap: usePubspecOverrides: true environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: async: ^2.11.0 beautiful_soup_dart: ^0.3.0 characters: ^1.3.0 - collection: '>=1.17.0 <1.19.0' - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 csv: ^6.0.0 equatable: ^2.0.5 - fetch_client: ^1.0.2 - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 - firebase_vertexai: ^0.1.0 - flutter_bloc: ^8.1.5 - flutter_markdown: ^0.6.22 - freezed_annotation: ^2.4.1 - gcloud: ^0.8.12 - google_generative_ai: 0.4.0 - googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + fetch_client: ^1.1.2 + firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 + firebase_core: ^3.3.0 + firebase_vertexai: ^0.2.2 + flat_buffers: ^23.5.26 + flutter_bloc: ^8.1.6 + flutter_markdown: ^0.7.3 + freezed_annotation: ^2.4.2 + gcloud: ^0.8.13 + google_generative_ai: 0.4.4 + googleapis: ^13.0.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 js: ^0.7.1 - json_annotation: ^4.8.1 - json_path: ^0.7.1 + json_annotation: ^4.9.0 + json_path: ^0.7.4 langchain_tiktoken: ^1.0.1 - math_expressions: ^2.4.0 + math_expressions: ^2.6.0 meta: ^1.11.0 + objectbox: ^4.0.1 pinecone: ^0.7.2 - shared_preferences: ^2.2.2 - shelf: ^1.4.1 + rxdart: ">=0.27.7 <0.29.0" + shared_preferences: ^2.3.0 + shelf: ^1.4.2 shelf_router: ^1.1.4 - supabase: ^2.0.8 - uuid: ^4.3.3 + supabase: ^2.2.7 + uuid: ^4.4.2 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 + objectbox_generator: ^4.0.1 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 scripts: lint: diff --git a/packages/anthropic_sdk_dart/CHANGELOG.md b/packages/anthropic_sdk_dart/CHANGELOG.md new file mode 100644 index 00000000..c9710913 --- /dev/null +++ b/packages/anthropic_sdk_dart/CHANGELOG.md @@ -0,0 +1,18 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0 + + - **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333)) + - **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909)) + - **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.0.1 + + - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae)) + +## 0.0.1-dev.1 + + - Bootstrap package. diff --git a/packages/anthropic_sdk_dart/LICENSE b/packages/anthropic_sdk_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/anthropic_sdk_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/anthropic_sdk_dart/README.md b/packages/anthropic_sdk_dart/README.md new file mode 100644 index 00000000..dc51d776 --- /dev/null +++ b/packages/anthropic_sdk_dart/README.md @@ -0,0 +1,304 @@ +# Anthropic Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Unofficial Dart client for [Anthropic](https://docs.anthropic.com/en/api) API (aka Claude API). + +## Features + +- Fully type-safe, [documented](https://pub.dev/documentation/anthropic_sdk_dart/latest) and tested +- All platforms supported (including streaming on web) +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** + +- Messages (with tools and streaming support) + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Messages](#messages) + * [Tool use](#tool-use) +- [Advance Usage](#advance-usage) + * [Default HTTP client](#default-http-client) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.anthropic.com) for more information about the API. + +### Authentication + +The Anthropic API uses API keys for authentication. Visit the [Anthropic console](https://console.anthropic.com/settings/keys) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['ANTHROPIC_API_KEY']; +final client = AnthropicClient(apiKey: apiKey); +``` + +### Messages + +Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +**Create a Message:** + +```dart +final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), +); +print(res.content.text); +// Hello! It's nice to meet you. How are you doing today? +``` + +`Model` is a sealed class that offers two ways to specify the model: +- `Model.modelId('model-id')`: the model ID as string (e.g. `'claude-instant-1.2'`). +- `Model.model(Models.claude35Sonnet20240620)`: a value from `Models` enum which lists all the available models. + +Mind that this list may not be up-to-date. Refer to the [documentation](https://docs.anthropic.com/en/docs/models-overview) for the updated list. + +**Streaming messages:** + +```dart +final stream = client.createMessageStream( + request: CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), +); +await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); +} +// Hello! It's nice to meet you. How are you doing today? +``` + +### Tool use + +Claude is capable of interacting with external client-side tools and functions, allowing you to equip Claude with your own custom tools to perform a wider variety of tasks. + +Refer to the [official documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information. + +In the following example, we want the model to be able to use our function that return the current weather in a given city: + +```dart +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} +``` + +To do that, we need to provide the definition of the tool: +```dart +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); +``` + +Then we can use the tool in the message request: +```dart +final request1 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, +); +final aiMessage1 = await client.createMessage(request: request1); + +final toolUse = aiMessage1.content.blocks.firstOrNull; +if (toolUse == null || toolUse is! ToolUseBlock) { + return; +} + +// Call your tool here with the given input +final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], +); + +final request2 = CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, +); +final aiMessage2 = await client.createMessage(request: request2); + +print(aiMessage2.content.text); +// Based on the current weather information for Boston, here's what it's like right now: +// +// The temperature in Boston is 71.6°F (Fahrenheit). +// The weather conditions are described as sunny. +``` + +You can also stream the input for a tool: + +```dart +final stream = client.createMessageStream(request: request); +await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, + ); +} +// {"location": "Boston, MA", "unit": "fahrenheit"} +``` + +## Advance Usage + +### Default HTTP client + +By default, the client uses `https://api.anthropic.com/v1` as the `baseUrl` and the following implementations of `http.Client`: + +- Non-web: [`IOClient`](https://pub.dev/documentation/http/latest/io_client/IOClient-class.html) +- Web: [`FetchClient`](https://pub.dev/documentation/fetch_client/latest/fetch_client/FetchClient-class.html) (to support streaming on web) + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = AnthropicClient( + apiKey: 'MISTRAL_API_KEY', + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = AnthropicClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = AnthropicClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. + +## License + +Anthropic Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/anthropic_sdk_dart/analysis_options.yaml b/packages/anthropic_sdk_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/anthropic_sdk_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/anthropic_sdk_dart/build.yaml b/packages/anthropic_sdk_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/anthropic_sdk_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart new file mode 100644 index 00000000..0a576196 --- /dev/null +++ b/packages/anthropic_sdk_dart/example/anthropic_sdk_dart_example.dart @@ -0,0 +1,200 @@ +// ignore_for_file: avoid_print +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; + +Future main() async { + final client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + + await _createMessage(client); + await _createMessageStream(client); + await _toolUse(client); + await _toolUseStreaming(client); + + client.endSession(); +} + +Future _createMessage(final AnthropicClient client) async { + final res = await client.createMessage( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + print(res.content.text); + // Hello! It's nice to meet you. How are you doing today? +} + +Future _createMessageStream(final AnthropicClient client) async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claude35Sonnet20240620), + maxTokens: 1024, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text('Hello, Claude'), + ), + ], + ), + ); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent e) {}, + messageDelta: (MessageDeltaEvent e) {}, + messageStop: (MessageStopEvent e) {}, + contentBlockStart: (ContentBlockStartEvent e) {}, + contentBlockDelta: (ContentBlockDeltaEvent e) { + stdout.write(e.delta.text); + }, + contentBlockStop: (ContentBlockStopEvent e) {}, + ping: (PingEvent e) {}, + ); + } + // Hello! It's nice to meet you. How are you doing today? +} + +Future _toolUse(final AnthropicClient client) async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final aiMessage1 = await client.createMessage(request: request1); + + final toolUse = aiMessage1.content.blocks.firstOrNull; + if (toolUse == null || toolUse is! ToolUseBlock) { + return; + } + + // Call your tool here with the given input + final toolResult = _getCurrentWeather( + toolUse.input['location'], + toolUse.input['unit'], + ); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(json.encode(toolResult)), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + print(aiMessage2.content.text); + // Based on the current weather information for Boston, here's what it's like right now: + // + // The temperature in Boston is 71.6°F (Fahrenheit). + // The weather conditions are described as sunny. +} + +Future _toolUseStreaming(final AnthropicClient client) async { + final request = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Fahrenheit?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + + final stream = client.createMessageStream(request: request); + await for (final res in stream) { + res.map( + messageStart: (MessageStartEvent v) {}, + messageDelta: (MessageDeltaEvent v) {}, + messageStop: (MessageStopEvent v) {}, + contentBlockStart: (ContentBlockStartEvent v) {}, + contentBlockDelta: (ContentBlockDeltaEvent v) { + stdout.write(v.delta.inputJson); + }, + contentBlockStop: (ContentBlockStopEvent v) {}, + ping: (PingEvent v) {}, + ); + } + // {"location": "Boston, MA", "unit": "fahrenheit"} +} + +Map _getCurrentWeather( + final String location, + final String unit, +) { + const temperature = 22; + const weather = 'Sunny'; + return { + 'temperature': unit == 'celsius' ? temperature : (temperature * 9 / 5) + 32, + 'unit': unit, + 'description': weather, + }; +} + +const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); diff --git a/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart new file mode 100644 index 00000000..4cc40a27 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/anthropic_sdk_dart.dart @@ -0,0 +1,7 @@ +/// Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). +library; + +export 'src/client.dart'; +export 'src/extensions.dart'; +export 'src/generated/client.dart' show AnthropicClientException; +export 'src/generated/schema/schema.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/client.dart b/packages/anthropic_sdk_dart/lib/src/client.dart new file mode 100644 index 00000000..3d02a34b --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/client.dart @@ -0,0 +1,104 @@ +// ignore_for_file: use_super_parameters +import 'dart:async'; +import 'dart:convert'; + +import 'package:http/http.dart' as http; + +import 'generated/client.dart' as g; +import 'generated/schema/schema.dart'; +import 'http_client/http_client.dart'; + +/// Client for Anthropic API. +/// +/// Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient extends g.AnthropicClient { + /// Create a new Anthropic API client. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic console](https://console.anthropic.com/settings/keys). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to `https://api.anthropic.com/v1`. + /// You can override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + AnthropicClient({ + final String? apiKey, + final String? baseUrl, + final Map? headers, + final Map? queryParams, + final http.Client? client, + }) : super( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: { + 'anthropic-version': '2023-06-01', + ...?headers, + }, + queryParams: queryParams ?? const {}, + client: client ?? createDefaultHttpClient(), + ); + + // ------------------------------------------ + // METHOD: createMessageStream + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Stream createMessageStream({ + required final CreateMessageRequest request, + }) async* { + final r = await makeRequestStream( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: g.HttpMethod.post, + requestType: 'application/json', + responseType: 'application/json', + body: request.copyWith(stream: true), + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + yield* r.stream + .transform(const _AnthropicStreamTransformer()) // + .map( + (final d) { + final j = json.decode(d) as Map; + return MessageStreamEvent.fromJson(j); + }, + ); + } + + @override + Future onRequest(final http.BaseRequest request) { + return onRequestHandler(request); + } +} + +class _AnthropicStreamTransformer + extends StreamTransformerBase, String> { + const _AnthropicStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()) // + .where((final i) => i.startsWith('data: ')) + .map((final item) => item.substring(6)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/extensions.dart b/packages/anthropic_sdk_dart/lib/src/extensions.dart new file mode 100644 index 00000000..ddafbab9 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/extensions.dart @@ -0,0 +1,92 @@ +import 'generated/schema/schema.dart'; + +/// Extension methods for [MessageContent]. +extension MessageContentX on MessageContent { + /// Returns the text content of the message. + String get text { + return map( + text: (text) => text.value, + blocks: (blocks) => + blocks.value.whereType().map((t) => t.text).join('\n'), + ); + } + + /// Returns the blocks of the message. + List get blocks { + return map( + text: (text) => [Block.text(text: text.value)], + blocks: (blocks) => blocks.value, + ); + } +} + +/// Extension methods for [ToolResultBlockContent]. +extension ToolResultBlockContentX on ToolResultBlockContent { + /// Returns the text content of the tool result block content. + String get text { + return map( + text: (ToolResultBlockContentText t) => t.value, + blocks: (b) => + b.value.whereType().map((t) => t.text).join('\n'), + ); + } + + /// Returns the blocks of the tool result block content. + List get blocks { + return map( + text: (t) => [Block.text(text: t.value)], + blocks: (b) => b.value, + ); + } +} + +/// Extension methods for [Block]. +extension BlockX on Block { + /// Returns the text content of the block. + String get text { + return mapOrNull( + text: (text) => text.text, + ) ?? + ''; + } + + /// Returns the image source of the block. + ImageBlock? get image { + return mapOrNull( + image: (image) => image, + ); + } + + /// Returns the tool use block. + ToolUseBlock? get toolUse { + return mapOrNull( + toolUse: (toolUse) => toolUse, + ); + } + + /// Returns the tool result block. + ToolResultBlock? get toolResult { + return mapOrNull( + toolResult: (toolResult) => toolResult, + ); + } +} + +/// Extension methods for [BlockDelta]. +extension BlockDeltaX on BlockDelta { + /// Returns the text content of the block delta. + String get text { + return map( + textDelta: (text) => text.text, + inputJsonDelta: (inputJson) => '', + ); + } + + /// Returns the type of the block delta. + String get inputJson { + return map( + textDelta: (text) => '', + inputJsonDelta: (inputJson) => inputJson.partialJson ?? '', + ); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/client.dart b/packages/anthropic_sdk_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..0f3e82a8 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/client.dart @@ -0,0 +1,395 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: AnthropicClientException +// ========================================== + +/// HTTP exception handler for AnthropicClient +class AnthropicClientException implements Exception { + AnthropicClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'AnthropicClientException($s)'; + } +} + +// ========================================== +// CLASS: AnthropicClient +// ========================================== + +/// Client for Anthropic API (v.1) +/// +/// API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. +class AnthropicClient { + /// Creates a new AnthropicClient instance. + /// + /// - [AnthropicClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [AnthropicClient.headers] Global headers to be sent with every request + /// - [AnthropicClient.queryParams] Global query parameters to be sent with every request + /// - [AnthropicClient.client] Override HTTP client to use for requests + AnthropicClient({ + this.apiKey = '', + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + /// Authentication related variables + final String apiKey; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw AnthropicClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: createMessage + // ------------------------------------------ + + /// Create a Message + /// + /// Send a structured list of input messages with text and/or image content, and the + /// model will generate the next message in the conversation. + /// + /// The Messages API can be used for either single queries or stateless multi-turn + /// conversations. + /// + /// `request`: The request parameters for creating a message. + /// + /// `POST` `https://api.anthropic.com/v1/messages` + Future createMessage({ + required CreateMessageRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.anthropic.com/v1', + path: '/messages', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + headerParams: { + if (apiKey.isNotEmpty) 'x-api-key': apiKey, + }, + ); + return Message.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart new file mode 100644 index 00000000..e15126a3 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block.dart @@ -0,0 +1,155 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Block +// ========================================== + +/// A block of content in a message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class Block with _$Block { + const Block._(); + + // ------------------------------------------ + // UNION: TextBlock + // ------------------------------------------ + + /// A block of text content. + const factory Block.text({ + /// The text content. + required String text, + + /// The type of content block. + @Default('text') String type, + }) = TextBlock; + + // ------------------------------------------ + // UNION: ImageBlock + // ------------------------------------------ + + /// A block of image content. + const factory Block.image({ + /// The source of an image block. + required ImageBlockSource source, + + /// The type of content block. + @Default('image') String type, + }) = ImageBlock; + + // ------------------------------------------ + // UNION: ToolUseBlock + // ------------------------------------------ + + /// The tool the model wants to use. + const factory Block.toolUse({ + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + required String id, + + /// The name of the tool being used. + required String name, + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + required Map input, + + /// The type of content block. + @Default('tool_use') String type, + }) = ToolUseBlock; + + // ------------------------------------------ + // UNION: ToolResultBlock + // ------------------------------------------ + + /// The result of using a tool. + const factory Block.toolResult({ + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') required String toolUseId, + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() required ToolResultBlockContent content, + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + + /// The type of content block. + @Default('tool_result') String type, + }) = ToolResultBlock; + + /// Object construction from a JSON representation + factory Block.fromJson(Map json) => _$BlockFromJson(json); +} + +// ========================================== +// ENUM: BlockEnumType +// ========================================== + +enum BlockEnumType { + @JsonValue('text') + text, + @JsonValue('image') + image, + @JsonValue('tool_use') + toolUse, + @JsonValue('tool_result') + toolResult, +} + +// ========================================== +// CLASS: ToolResultBlockContent +// ========================================== + +/// The result of the tool, as a string (e.g. `"content": "15 degrees"`) +/// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). +/// These content blocks can use the text or image types. +@freezed +sealed class ToolResultBlockContent with _$ToolResultBlockContent { + const ToolResultBlockContent._(); + + /// An array of content blocks. + const factory ToolResultBlockContent.blocks( + List value, + ) = ToolResultBlockContentBlocks; + + /// A single text block. + const factory ToolResultBlockContent.text( + String value, + ) = ToolResultBlockContentText; + + /// Object construction from a JSON representation + factory ToolResultBlockContent.fromJson(Map json) => + _$ToolResultBlockContentFromJson(json); +} + +/// Custom JSON converter for [ToolResultBlockContent] +class _ToolResultBlockContentConverter + implements JsonConverter { + const _ToolResultBlockContentConverter(); + + @override + ToolResultBlockContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return ToolResultBlockContentBlocks(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return ToolResultBlockContentText(data); + } + throw Exception( + 'Unexpected value for ToolResultBlockContent: $data', + ); + } + + @override + Object? toJson(ToolResultBlockContent data) { + return switch (data) { + ToolResultBlockContentBlocks(value: final v) => v, + ToolResultBlockContentText(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart new file mode 100644 index 00000000..d107a864 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/block_delta.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: BlockDelta +// ========================================== + +/// A delta in a streaming message. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class BlockDelta with _$BlockDelta { + const BlockDelta._(); + + // ------------------------------------------ + // UNION: TextBlockDelta + // ------------------------------------------ + + /// A delta in a streaming text block. + const factory BlockDelta.textDelta({ + /// The text delta. + required String text, + + /// The type of content block. + required String type, + }) = TextBlockDelta; + + // ------------------------------------------ + // UNION: InputJsonBlockDelta + // ------------------------------------------ + + /// A delta in a streaming input JSON. + const factory BlockDelta.inputJsonDelta({ + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + + /// The type of content block. + required String type, + }) = InputJsonBlockDelta; + + /// Object construction from a JSON representation + factory BlockDelta.fromJson(Map json) => + _$BlockDeltaFromJson(json); +} + +// ========================================== +// ENUM: BlockDeltaEnumType +// ========================================== + +enum BlockDeltaEnumType { + @JsonValue('text_delta') + textDelta, + @JsonValue('input_json_delta') + inputJsonDelta, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart new file mode 100644 index 00000000..e310adff --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request.dart @@ -0,0 +1,380 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequest +// ========================================== + +/// The request parameters for creating a message. +@freezed +class CreateMessageRequest with _$CreateMessageRequest { + const CreateMessageRequest._(); + + /// Factory constructor for CreateMessageRequest + const factory CreateMessageRequest({ + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() required Model model, + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + required List messages, + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') required int maxTokens, + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) String? system, + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) double? temperature, + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) ToolChoice? toolChoice, + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) List? tools, + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @Default(false) bool stream, + }) = _CreateMessageRequest; + + /// Object construction from a JSON representation + factory CreateMessageRequest.fromJson(Map json) => + _$CreateMessageRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'messages', + 'max_tokens', + 'metadata', + 'stop_sequences', + 'system', + 'temperature', + 'tool_choice', + 'tools', + 'top_k', + 'top_p', + 'stream' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'messages': messages, + 'max_tokens': maxTokens, + 'metadata': metadata, + 'stop_sequences': stopSequences, + 'system': system, + 'temperature': temperature, + 'tool_choice': toolChoice, + 'tools': tools, + 'top_k': topK, + 'top_p': topP, + 'stream': stream, + }; + } +} + +// ========================================== +// ENUM: Models +// ========================================== + +/// Available models. Mind that the list may not be exhaustive nor up-to-date. +enum Models { + @JsonValue('claude-3-5-sonnet-20240620') + claude35Sonnet20240620, + @JsonValue('claude-3-haiku-20240307') + claude3Haiku20240307, + @JsonValue('claude-3-opus-20240229') + claude3Opus20240229, + @JsonValue('claude-3-sonnet-20240229') + claude3Sonnet20240229, + @JsonValue('claude-2.0') + claude20, + @JsonValue('claude-2.1') + claude21, + @JsonValue('claude-instant-1.2') + claudeInstant12, +} + +// ========================================== +// CLASS: Model +// ========================================== + +/// The model that will complete your prompt. +/// +/// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional +/// details and options. +@freezed +sealed class Model with _$Model { + const Model._(); + + /// Available models. Mind that the list may not be exhaustive nor up-to-date. + const factory Model.model( + Models value, + ) = ModelCatalog; + + /// The ID of the model to use for this request. + const factory Model.modelId( + String value, + ) = ModelId; + + /// Object construction from a JSON representation + factory Model.fromJson(Map json) => _$ModelFromJson(json); +} + +/// Custom JSON converter for [Model] +class _ModelConverter implements JsonConverter { + const _ModelConverter(); + + @override + Model fromJson(Object? data) { + if (data is String && _$ModelsEnumMap.values.contains(data)) { + return ModelCatalog( + _$ModelsEnumMap.keys.elementAt( + _$ModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return ModelId(data); + } + throw Exception( + 'Unexpected value for Model: $data', + ); + } + + @override + Object? toJson(Model data) { + return switch (data) { + ModelCatalog(value: final v) => _$ModelsEnumMap[v]!, + ModelId(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart new file mode 100644 index 00000000..bf588756 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/create_message_request_metadata.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: CreateMessageRequestMetadata +// ========================================== + +/// An object describing metadata about the request. +@freezed +class CreateMessageRequestMetadata with _$CreateMessageRequestMetadata { + const CreateMessageRequestMetadata._(); + + /// Factory constructor for CreateMessageRequestMetadata + const factory CreateMessageRequestMetadata({ + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) String? userId, + }) = _CreateMessageRequestMetadata; + + /// Object construction from a JSON representation + factory CreateMessageRequestMetadata.fromJson(Map json) => + _$CreateMessageRequestMetadataFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['user_id']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'user_id': userId, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart new file mode 100644 index 00000000..e0a89687 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/image_block_source.dart @@ -0,0 +1,74 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ImageBlockSource +// ========================================== + +/// The source of an image block. +@freezed +class ImageBlockSource with _$ImageBlockSource { + const ImageBlockSource._(); + + /// Factory constructor for ImageBlockSource + const factory ImageBlockSource({ + /// The base64-encoded image data. + required String data, + + /// The media type of the image. + @JsonKey(name: 'media_type') required ImageBlockSourceMediaType mediaType, + + /// The type of image source. + required ImageBlockSourceType type, + }) = _ImageBlockSource; + + /// Object construction from a JSON representation + factory ImageBlockSource.fromJson(Map json) => + _$ImageBlockSourceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['data', 'media_type', 'type']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'media_type': mediaType, + 'type': type, + }; + } +} + +// ========================================== +// ENUM: ImageBlockSourceMediaType +// ========================================== + +/// The media type of the image. +enum ImageBlockSourceMediaType { + @JsonValue('image/jpeg') + imageJpeg, + @JsonValue('image/png') + imagePng, + @JsonValue('image/gif') + imageGif, + @JsonValue('image/webp') + imageWebp, +} + +// ========================================== +// ENUM: ImageBlockSourceType +// ========================================== + +/// The type of image source. +enum ImageBlockSourceType { + @JsonValue('base64') + base64, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart new file mode 100644 index 00000000..2444ac92 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message.dart @@ -0,0 +1,162 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Message +// ========================================== + +/// A message in a chat conversation. +@freezed +class Message with _$Message { + const Message._(); + + /// Factory constructor for Message + const factory Message({ + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) String? id, + + /// The content of the message. + @_MessageContentConverter() required MessageContent content, + + /// The role of the messages author. + required MessageRole role, + + /// The model that handled the request. + @JsonKey(includeIfNull: false) String? model, + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) String? type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) Usage? usage, + }) = _Message; + + /// Object construction from a JSON representation + factory Message.fromJson(Map json) => + _$MessageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'content', + 'role', + 'model', + 'stop_reason', + 'stop_sequence', + 'type', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'content': content, + 'role': role, + 'model': model, + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + 'type': type, + 'usage': usage, + }; + } +} + +// ========================================== +// CLASS: MessageContent +// ========================================== + +/// The content of the message. +@freezed +sealed class MessageContent with _$MessageContent { + const MessageContent._(); + + /// An array of content blocks. + const factory MessageContent.blocks( + List value, + ) = MessageContentBlocks; + + /// A single text block. + const factory MessageContent.text( + String value, + ) = MessageContentText; + + /// Object construction from a JSON representation + factory MessageContent.fromJson(Map json) => + _$MessageContentFromJson(json); +} + +/// Custom JSON converter for [MessageContent] +class _MessageContentConverter + implements JsonConverter { + const _MessageContentConverter(); + + @override + MessageContent fromJson(Object? data) { + if (data is List && data.every((item) => item is Map)) { + return MessageContentBlocks(data + .map((i) => Block.fromJson(i as Map)) + .toList(growable: false)); + } + if (data is String) { + return MessageContentText(data); + } + throw Exception( + 'Unexpected value for MessageContent: $data', + ); + } + + @override + Object? toJson(MessageContent data) { + return switch (data) { + MessageContentBlocks(value: final v) => v, + MessageContentText(value: final v) => v, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart new file mode 100644 index 00000000..aa23db40 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDelta +// ========================================== + +/// A delta in a streaming message. +@freezed +class MessageDelta with _$MessageDelta { + const MessageDelta._(); + + /// Factory constructor for MessageDelta + const factory MessageDelta({ + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + StopReason? stopReason, + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) String? stopSequence, + }) = _MessageDelta; + + /// Object construction from a JSON representation + factory MessageDelta.fromJson(Map json) => + _$MessageDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['stop_reason', 'stop_sequence']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'stop_reason': stopReason, + 'stop_sequence': stopSequence, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart new file mode 100644 index 00000000..3ce710cc --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_delta_usage.dart @@ -0,0 +1,51 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageDeltaUsage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class MessageDeltaUsage with _$MessageDeltaUsage { + const MessageDeltaUsage._(); + + /// Factory constructor for MessageDeltaUsage + const factory MessageDeltaUsage({ + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _MessageDeltaUsage; + + /// Object construction from a JSON representation + factory MessageDeltaUsage.fromJson(Map json) => + _$MessageDeltaUsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart new file mode 100644 index 00000000..e502789a --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_role.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageRole +// ========================================== + +/// The role of the messages author. +enum MessageRole { + @JsonValue('user') + user, + @JsonValue('assistant') + assistant, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart new file mode 100644 index 00000000..46ef88ba --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event.dart @@ -0,0 +1,126 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: MessageStreamEvent +// ========================================== + +/// A event in a streaming conversation. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class MessageStreamEvent with _$MessageStreamEvent { + const MessageStreamEvent._(); + + // ------------------------------------------ + // UNION: MessageStartEvent + // ------------------------------------------ + + /// A start event in a streaming conversation. + const factory MessageStreamEvent.messageStart({ + /// A message in a chat conversation. + required Message message, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStartEvent; + + // ------------------------------------------ + // UNION: MessageDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming conversation. + const factory MessageStreamEvent.messageDelta({ + /// A delta in a streaming message. + required MessageDelta delta, + + /// The type of a streaming event. + required MessageStreamEventType type, + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + required MessageDeltaUsage usage, + }) = MessageDeltaEvent; + + // ------------------------------------------ + // UNION: MessageStopEvent + // ------------------------------------------ + + /// A stop event in a streaming conversation. + const factory MessageStreamEvent.messageStop({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = MessageStopEvent; + + // ------------------------------------------ + // UNION: ContentBlockStartEvent + // ------------------------------------------ + + /// A start event in a streaming content block. + const factory MessageStreamEvent.contentBlockStart({ + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') required Block contentBlock, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStartEvent; + + // ------------------------------------------ + // UNION: ContentBlockDeltaEvent + // ------------------------------------------ + + /// A delta event in a streaming content block. + const factory MessageStreamEvent.contentBlockDelta({ + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + required BlockDelta delta, + + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockDeltaEvent; + + // ------------------------------------------ + // UNION: ContentBlockStopEvent + // ------------------------------------------ + + /// A stop event in a streaming content block. + const factory MessageStreamEvent.contentBlockStop({ + /// The index of the content block. + required int index, + + /// The type of a streaming event. + required MessageStreamEventType type, + }) = ContentBlockStopEvent; + + // ------------------------------------------ + // UNION: PingEvent + // ------------------------------------------ + + /// A ping event in a streaming conversation. + const factory MessageStreamEvent.ping({ + /// The type of a streaming event. + required MessageStreamEventType type, + }) = PingEvent; + + /// Object construction from a JSON representation + factory MessageStreamEvent.fromJson(Map json) => + _$MessageStreamEventFromJson(json); +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart new file mode 100644 index 00000000..0e6aa425 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/message_stream_event_type.dart @@ -0,0 +1,27 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: MessageStreamEventType +// ========================================== + +/// The type of a streaming event. +enum MessageStreamEventType { + @JsonValue('message_start') + messageStart, + @JsonValue('message_delta') + messageDelta, + @JsonValue('message_stop') + messageStop, + @JsonValue('content_block_start') + contentBlockStart, + @JsonValue('content_block_delta') + contentBlockDelta, + @JsonValue('content_block_stop') + contentBlockStop, + @JsonValue('ping') + ping, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..b9d2ef26 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,28 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library anthropic_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'create_message_request.dart'; +part 'create_message_request_metadata.dart'; +part 'tool_choice.dart'; +part 'tool_choice_type.dart'; +part 'message.dart'; +part 'message_role.dart'; +part 'tool.dart'; +part 'image_block_source.dart'; +part 'stop_reason.dart'; +part 'usage.dart'; +part 'message_stream_event_type.dart'; +part 'message_delta.dart'; +part 'message_delta_usage.dart'; +part 'block.dart'; +part 'message_stream_event.dart'; +part 'block_delta.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..4045606f --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,7758 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +CreateMessageRequest _$CreateMessageRequestFromJson(Map json) { + return _CreateMessageRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequest { + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model => throw _privateConstructorUsedError; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages => throw _privateConstructorUsedError; + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens => throw _privateConstructorUsedError; + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata => + throw _privateConstructorUsedError; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences => throw _privateConstructorUsedError; + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system => throw _privateConstructorUsedError; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice => throw _privateConstructorUsedError; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK => throw _privateConstructorUsedError; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestCopyWith<$Res> { + factory $CreateMessageRequestCopyWith(CreateMessageRequest value, + $Res Function(CreateMessageRequest) then) = + _$CreateMessageRequestCopyWithImpl<$Res, CreateMessageRequest>; + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + $ModelCopyWith<$Res> get model; + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + $ToolChoiceCopyWith<$Res>? get toolChoice; +} + +/// @nodoc +class _$CreateMessageRequestCopyWithImpl<$Res, + $Val extends CreateMessageRequest> + implements $CreateMessageRequestCopyWith<$Res> { + _$CreateMessageRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value.messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value.stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelCopyWith<$Res> get model { + return $ModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata { + if (_value.metadata == null) { + return null; + } + + return $CreateMessageRequestMetadataCopyWith<$Res>(_value.metadata!, + (value) { + return _then(_value.copyWith(metadata: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $ToolChoiceCopyWith<$Res>? get toolChoice { + if (_value.toolChoice == null) { + return null; + } + + return $ToolChoiceCopyWith<$Res>(_value.toolChoice!, (value) { + return _then(_value.copyWith(toolChoice: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestImplCopyWith<$Res> + implements $CreateMessageRequestCopyWith<$Res> { + factory _$$CreateMessageRequestImplCopyWith(_$CreateMessageRequestImpl value, + $Res Function(_$CreateMessageRequestImpl) then) = + __$$CreateMessageRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_ModelConverter() Model model, + List messages, + @JsonKey(name: 'max_tokens') int maxTokens, + @JsonKey(includeIfNull: false) CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? stopSequences, + @JsonKey(includeIfNull: false) String? system, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) List? tools, + @JsonKey(name: 'top_k', includeIfNull: false) int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + bool stream}); + + @override + $ModelCopyWith<$Res> get model; + @override + $CreateMessageRequestMetadataCopyWith<$Res>? get metadata; + @override + $ToolChoiceCopyWith<$Res>? get toolChoice; +} + +/// @nodoc +class __$$CreateMessageRequestImplCopyWithImpl<$Res> + extends _$CreateMessageRequestCopyWithImpl<$Res, _$CreateMessageRequestImpl> + implements _$$CreateMessageRequestImplCopyWith<$Res> { + __$$CreateMessageRequestImplCopyWithImpl(_$CreateMessageRequestImpl _value, + $Res Function(_$CreateMessageRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? maxTokens = null, + Object? metadata = freezed, + Object? stopSequences = freezed, + Object? system = freezed, + Object? temperature = freezed, + Object? toolChoice = freezed, + Object? tools = freezed, + Object? topK = freezed, + Object? topP = freezed, + Object? stream = null, + }) { + return _then(_$CreateMessageRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as Model, + messages: null == messages + ? _value._messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + maxTokens: null == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as CreateMessageRequestMetadata?, + stopSequences: freezed == stopSequences + ? _value._stopSequences + : stopSequences // ignore: cast_nullable_to_non_nullable + as List?, + system: freezed == system + ? _value.system + : system // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + toolChoice: freezed == toolChoice + ? _value.toolChoice + : toolChoice // ignore: cast_nullable_to_non_nullable + as ToolChoice?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, + topK: freezed == topK + ? _value.topK + : topK // ignore: cast_nullable_to_non_nullable + as int?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestImpl extends _CreateMessageRequest { + const _$CreateMessageRequestImpl( + {@_ModelConverter() required this.model, + required final List messages, + @JsonKey(name: 'max_tokens') required this.maxTokens, + @JsonKey(includeIfNull: false) this.metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) this.system, + @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(includeIfNull: false) final List? tools, + @JsonKey(name: 'top_k', includeIfNull: false) this.topK, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + this.stream = false}) + : _messages = messages, + _stopSequences = stopSequences, + _tools = tools, + super._(); + + factory _$CreateMessageRequestImpl.fromJson(Map json) => + _$$CreateMessageRequestImplFromJson(json); + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @override + @_ModelConverter() + final Model model; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + final List _messages; + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + @override + List get messages { + if (_messages is EqualUnmodifiableListView) return _messages; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_messages); + } + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @override + @JsonKey(name: 'max_tokens') + final int maxTokens; + + /// An object describing metadata about the request. + @override + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + final List? _stopSequences; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @override + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences { + final value = _stopSequences; + if (value == null) return null; + if (_stopSequences is EqualUnmodifiableListView) return _stopSequences; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @override + @JsonKey(includeIfNull: false) + final String? system; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + final List? _tools; + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_k', includeIfNull: false) + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + @override + @JsonKey() + final bool stream; + + @override + String toString() { + return 'CreateMessageRequest(model: $model, messages: $messages, maxTokens: $maxTokens, metadata: $metadata, stopSequences: $stopSequences, system: $system, temperature: $temperature, toolChoice: $toolChoice, tools: $tools, topK: $topK, topP: $topP, stream: $stream)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestImpl && + (identical(other.model, model) || other.model == model) && + const DeepCollectionEquality().equals(other._messages, _messages) && + (identical(other.maxTokens, maxTokens) || + other.maxTokens == maxTokens) && + (identical(other.metadata, metadata) || + other.metadata == metadata) && + const DeepCollectionEquality() + .equals(other._stopSequences, _stopSequences) && + (identical(other.system, system) || other.system == system) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.toolChoice, toolChoice) || + other.toolChoice == toolChoice) && + const DeepCollectionEquality().equals(other._tools, _tools) && + (identical(other.topK, topK) || other.topK == topK) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.stream, stream) || other.stream == stream)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + const DeepCollectionEquality().hash(_messages), + maxTokens, + metadata, + const DeepCollectionEquality().hash(_stopSequences), + system, + temperature, + toolChoice, + const DeepCollectionEquality().hash(_tools), + topK, + topP, + stream); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => + __$$CreateMessageRequestImplCopyWithImpl<_$CreateMessageRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequest extends CreateMessageRequest { + const factory _CreateMessageRequest( + {@_ModelConverter() required final Model model, + required final List messages, + @JsonKey(name: 'max_tokens') required final int maxTokens, + @JsonKey(includeIfNull: false) + final CreateMessageRequestMetadata? metadata, + @JsonKey(name: 'stop_sequences', includeIfNull: false) + final List? stopSequences, + @JsonKey(includeIfNull: false) final String? system, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'tool_choice', includeIfNull: false) + final ToolChoice? toolChoice, + @JsonKey(includeIfNull: false) final List? tools, + @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + final bool stream}) = _$CreateMessageRequestImpl; + const _CreateMessageRequest._() : super._(); + + factory _CreateMessageRequest.fromJson(Map json) = + _$CreateMessageRequestImpl.fromJson; + + @override + + /// The model that will complete your prompt. + /// + /// See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + /// details and options. + @_ModelConverter() + Model get model; + @override + + /// Input messages. + /// + /// Our models are trained to operate on alternating `user` and `assistant` + /// conversational turns. When creating a new `Message`, you specify the prior + /// conversational turns with the `messages` parameter, and the model then generates + /// the next `Message` in the conversation. + /// + /// Each input message must be an object with a `role` and `content`. You can + /// specify a single `user`-role message, or you can include multiple `user` and + /// `assistant` messages. The first message must always use the `user` role. + /// + /// If the final message uses the `assistant` role, the response content will + /// continue immediately from the content in that message. This can be used to + /// constrain part of the model's response. + /// + /// See [message content](https://docs.anthropic.com/en/api/messages-content) for + /// details on how to construct valid message objects. + /// + /// Example with a single `user` message: + /// + /// ```json + /// [{ "role": "user", "content": "Hello, Claude" }] + /// ``` + /// + /// Example with multiple conversational turns: + /// + /// ```json + /// [ + /// { "role": "user", "content": "Hello there." }, + /// { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + /// { "role": "user", "content": "Can you explain LLMs in plain English?" } + /// ] + /// ``` + /// + /// Example with a partially-filled response from Claude: + /// + /// ```json + /// [ + /// { + /// "role": "user", + /// "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + /// }, + /// { "role": "assistant", "content": "The best answer is (" } + /// ] + /// ``` + /// + /// Each input message `content` may be either a single `string` or an array of + /// content blocks, where each block has a specific `type`. Using a `string` for + /// `content` is shorthand for an array of one content block of type `"text"`. The + /// following input messages are equivalent: + /// + /// ```json + /// { "role": "user", "content": "Hello, Claude" } + /// ``` + /// + /// ```json + /// { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + /// ``` + /// + /// Starting with Claude 3 models, you can also send image content blocks: + /// + /// ```json + /// { + /// "role": "user", + /// "content": [ + /// { + /// "type": "image", + /// "source": { + /// "type": "base64", + /// "media_type": "image/jpeg", + /// "data": "/9j/4AAQSkZJRg..." + /// } + /// }, + /// { "type": "text", "text": "What is in this image?" } + /// ] + /// } + /// ``` + /// + /// We currently support the `base64` source type for images, and the `image/jpeg`, + /// `image/png`, `image/gif`, and `image/webp` media types. + /// + /// See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + /// input examples. + /// + /// Note that if you want to include a + /// [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + /// the top-level `system` parameter — there is no `"system"` role for input + /// messages in the Messages API. + List get messages; + @override + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + @JsonKey(name: 'max_tokens') + int get maxTokens; + @override + + /// An object describing metadata about the request. + @JsonKey(includeIfNull: false) + CreateMessageRequestMetadata? get metadata; + @override + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Our models will normally stop when they have naturally completed their turn, + /// which will result in a response `stop_reason` of `"end_turn"`. + /// + /// If you want the model to stop generating when it encounters custom strings of + /// text, you can use the `stop_sequences` parameter. If the model encounters one of + /// the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + /// and the response `stop_sequence` value will contain the matched stop sequence. + @JsonKey(name: 'stop_sequences', includeIfNull: false) + List? get stopSequences; + @override + + /// System prompt. + /// + /// A system prompt is a way of providing context and instructions to Claude, such + /// as specifying a particular goal or role. See our + /// [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + @JsonKey(includeIfNull: false) + String? get system; + @override + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @JsonKey(name: 'tool_choice', includeIfNull: false) + ToolChoice? get toolChoice; + @override + + /// Definitions of tools that the model may use. + /// + /// If you include `tools` in your API request, the model may return `tool_use` + /// content blocks that represent the model's use of those tools. You can then run + /// those tools using the tool input generated by the model and then optionally + /// return results back to the model using `tool_result` content blocks. + /// + /// Each tool definition includes: + /// + /// - `name`: Name of the tool. + /// - `description`: Optional, but strongly-recommended description of the tool. + /// - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + /// shape that the model will produce in `tool_use` output content blocks. + /// + /// For example, if you defined `tools` as: + /// + /// ```json + /// [ + /// { + /// "name": "get_stock_price", + /// "description": "Get the current stock price for a given ticker symbol.", + /// "input_schema": { + /// "type": "object", + /// "properties": { + /// "ticker": { + /// "type": "string", + /// "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + /// } + /// }, + /// "required": ["ticker"] + /// } + /// } + /// ] + /// ``` + /// + /// And then asked the model "What's the S&P 500 at today?", the model might produce + /// `tool_use` content blocks in the response like this: + /// + /// ```json + /// [ + /// { + /// "type": "tool_use", + /// "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "name": "get_stock_price", + /// "input": { "ticker": "^GSPC" } + /// } + /// ] + /// ``` + /// + /// You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + /// input, and return the following back to the model in a subsequent `user` + /// message: + /// + /// ```json + /// [ + /// { + /// "type": "tool_result", + /// "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + /// "content": "259.75 USD" + /// } + /// ] + /// ``` + /// + /// Tools can be used for workflows that include running client-side tools and + /// functions, or more generally whenever you want the model to produce a particular + /// JSON structure of output. + /// + /// See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + @JsonKey(includeIfNull: false) + List? get tools; + @override + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_k', includeIfNull: false) + int? get topK; + @override + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// Whether to incrementally stream the response using server-sent events. + /// + /// See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + /// details. + bool get stream; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Model _$ModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'model': + return ModelCatalog.fromJson(json); + case 'modelId': + return ModelId.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'Model', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$Model { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; +} + +/// @nodoc +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ModelCatalogImplCopyWith<$Res> { + factory _$$ModelCatalogImplCopyWith( + _$ModelCatalogImpl value, $Res Function(_$ModelCatalogImpl) then) = + __$$ModelCatalogImplCopyWithImpl<$Res>; + @useResult + $Res call({Models value}); +} + +/// @nodoc +class __$$ModelCatalogImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelCatalogImpl> + implements _$$ModelCatalogImplCopyWith<$Res> { + __$$ModelCatalogImplCopyWithImpl( + _$ModelCatalogImpl _value, $Res Function(_$ModelCatalogImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelCatalogImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as Models, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelCatalogImpl extends ModelCatalog { + const _$ModelCatalogImpl(this.value, {final String? $type}) + : $type = $type ?? 'model', + super._(); + + factory _$ModelCatalogImpl.fromJson(Map json) => + _$$ModelCatalogImplFromJson(json); + + @override + final Models value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.model(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelCatalogImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => + __$$ModelCatalogImplCopyWithImpl<_$ModelCatalogImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return model(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return model?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, + }) { + return model(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, + }) { + return model?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, + required TResult orElse(), + }) { + if (model != null) { + return model(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelCatalogImplToJson( + this, + ); + } +} + +abstract class ModelCatalog extends Model { + const factory ModelCatalog(final Models value) = _$ModelCatalogImpl; + const ModelCatalog._() : super._(); + + factory ModelCatalog.fromJson(Map json) = + _$ModelCatalogImpl.fromJson; + + @override + Models get value; + @JsonKey(ignore: true) + _$$ModelCatalogImplCopyWith<_$ModelCatalogImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ModelIdImplCopyWith<$Res> { + factory _$$ModelIdImplCopyWith( + _$ModelIdImpl value, $Res Function(_$ModelIdImpl) then) = + __$$ModelIdImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ModelIdImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelIdImpl> + implements _$$ModelIdImplCopyWith<$Res> { + __$$ModelIdImplCopyWithImpl( + _$ModelIdImpl _value, $Res Function(_$ModelIdImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ModelIdImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelIdImpl extends ModelId { + const _$ModelIdImpl(this.value, {final String? $type}) + : $type = $type ?? 'modelId', + super._(); + + factory _$ModelIdImpl.fromJson(Map json) => + _$$ModelIdImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'Model.modelId(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelIdImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => + __$$ModelIdImplCopyWithImpl<_$ModelIdImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Models value) model, + required TResult Function(String value) modelId, + }) { + return modelId(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Models value)? model, + TResult? Function(String value)? modelId, + }) { + return modelId?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Models value)? model, + TResult Function(String value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ModelCatalog value) model, + required TResult Function(ModelId value) modelId, + }) { + return modelId(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModelCatalog value)? model, + TResult? Function(ModelId value)? modelId, + }) { + return modelId?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModelCatalog value)? model, + TResult Function(ModelId value)? modelId, + required TResult orElse(), + }) { + if (modelId != null) { + return modelId(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModelIdImplToJson( + this, + ); + } +} + +abstract class ModelId extends Model { + const factory ModelId(final String value) = _$ModelIdImpl; + const ModelId._() : super._(); + + factory ModelId.fromJson(Map json) = _$ModelIdImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ModelIdImplCopyWith<_$ModelIdImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateMessageRequestMetadata _$CreateMessageRequestMetadataFromJson( + Map json) { + return _CreateMessageRequestMetadata.fromJson(json); +} + +/// @nodoc +mixin _$CreateMessageRequestMetadata { + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateMessageRequestMetadataCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateMessageRequestMetadataCopyWith<$Res> { + factory $CreateMessageRequestMetadataCopyWith( + CreateMessageRequestMetadata value, + $Res Function(CreateMessageRequestMetadata) then) = + _$CreateMessageRequestMetadataCopyWithImpl<$Res, + CreateMessageRequestMetadata>; + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class _$CreateMessageRequestMetadataCopyWithImpl<$Res, + $Val extends CreateMessageRequestMetadata> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + _$CreateMessageRequestMetadataCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_value.copyWith( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateMessageRequestMetadataImplCopyWith<$Res> + implements $CreateMessageRequestMetadataCopyWith<$Res> { + factory _$$CreateMessageRequestMetadataImplCopyWith( + _$CreateMessageRequestMetadataImpl value, + $Res Function(_$CreateMessageRequestMetadataImpl) then) = + __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'user_id', includeIfNull: false) String? userId}); +} + +/// @nodoc +class __$$CreateMessageRequestMetadataImplCopyWithImpl<$Res> + extends _$CreateMessageRequestMetadataCopyWithImpl<$Res, + _$CreateMessageRequestMetadataImpl> + implements _$$CreateMessageRequestMetadataImplCopyWith<$Res> { + __$$CreateMessageRequestMetadataImplCopyWithImpl( + _$CreateMessageRequestMetadataImpl _value, + $Res Function(_$CreateMessageRequestMetadataImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? userId = freezed, + }) { + return _then(_$CreateMessageRequestMetadataImpl( + userId: freezed == userId + ? _value.userId + : userId // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateMessageRequestMetadataImpl extends _CreateMessageRequestMetadata { + const _$CreateMessageRequestMetadataImpl( + {@JsonKey(name: 'user_id', includeIfNull: false) this.userId}) + : super._(); + + factory _$CreateMessageRequestMetadataImpl.fromJson( + Map json) => + _$$CreateMessageRequestMetadataImplFromJson(json); + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @override + @JsonKey(name: 'user_id', includeIfNull: false) + final String? userId; + + @override + String toString() { + return 'CreateMessageRequestMetadata(userId: $userId)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateMessageRequestMetadataImpl && + (identical(other.userId, userId) || other.userId == userId)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, userId); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => __$$CreateMessageRequestMetadataImplCopyWithImpl< + _$CreateMessageRequestMetadataImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateMessageRequestMetadataImplToJson( + this, + ); + } +} + +abstract class _CreateMessageRequestMetadata + extends CreateMessageRequestMetadata { + const factory _CreateMessageRequestMetadata( + {@JsonKey(name: 'user_id', includeIfNull: false) + final String? userId}) = _$CreateMessageRequestMetadataImpl; + const _CreateMessageRequestMetadata._() : super._(); + + factory _CreateMessageRequestMetadata.fromJson(Map json) = + _$CreateMessageRequestMetadataImpl.fromJson; + + @override + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + @JsonKey(name: 'user_id', includeIfNull: false) + String? get userId; + @override + @JsonKey(ignore: true) + _$$CreateMessageRequestMetadataImplCopyWith< + _$CreateMessageRequestMetadataImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ToolChoice _$ToolChoiceFromJson(Map json) { + return _ToolChoice.fromJson(json); +} + +/// @nodoc +mixin _$ToolChoice { + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type => throw _privateConstructorUsedError; + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolChoiceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolChoiceCopyWith<$Res> { + factory $ToolChoiceCopyWith( + ToolChoice value, $Res Function(ToolChoice) then) = + _$ToolChoiceCopyWithImpl<$Res, ToolChoice>; + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class _$ToolChoiceCopyWithImpl<$Res, $Val extends ToolChoice> + implements $ToolChoiceCopyWith<$Res> { + _$ToolChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolChoiceImplCopyWith<$Res> + implements $ToolChoiceCopyWith<$Res> { + factory _$$ToolChoiceImplCopyWith( + _$ToolChoiceImpl value, $Res Function(_$ToolChoiceImpl) then) = + __$$ToolChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ToolChoiceType type, @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class __$$ToolChoiceImplCopyWithImpl<$Res> + extends _$ToolChoiceCopyWithImpl<$Res, _$ToolChoiceImpl> + implements _$$ToolChoiceImplCopyWith<$Res> { + __$$ToolChoiceImplCopyWithImpl( + _$ToolChoiceImpl _value, $Res Function(_$ToolChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? name = freezed, + }) { + return _then(_$ToolChoiceImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolChoiceType, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolChoiceImpl extends _ToolChoice { + const _$ToolChoiceImpl( + {required this.type, @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ToolChoiceImpl.fromJson(Map json) => + _$$ToolChoiceImplFromJson(json); + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + @override + final ToolChoiceType type; + + /// The name of the tool to use. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ToolChoice(type: $type, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolChoiceImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type, name); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + __$$ToolChoiceImplCopyWithImpl<_$ToolChoiceImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolChoiceImplToJson( + this, + ); + } +} + +abstract class _ToolChoice extends ToolChoice { + const factory _ToolChoice( + {required final ToolChoiceType type, + @JsonKey(includeIfNull: false) final String? name}) = _$ToolChoiceImpl; + const _ToolChoice._() : super._(); + + factory _ToolChoice.fromJson(Map json) = + _$ToolChoiceImpl.fromJson; + + @override + + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + ToolChoiceType get type; + @override + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) + String? get name; + @override + @JsonKey(ignore: true) + _$$ToolChoiceImplCopyWith<_$ToolChoiceImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Message _$MessageFromJson(Map json) { + return _Message.fromJson(json); +} + +/// @nodoc +mixin _$Message { + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id => throw _privateConstructorUsedError; + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content => throw _privateConstructorUsedError; + + /// The role of the messages author. + MessageRole get role => throw _privateConstructorUsedError; + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type => throw _privateConstructorUsedError; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageCopyWith<$Res> { + factory $MessageCopyWith(Message value, $Res Function(Message) then) = + _$MessageCopyWithImpl<$Res, Message>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + $MessageContentCopyWith<$Res> get content; + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class _$MessageCopyWithImpl<$Res, $Val extends Message> + implements $MessageCopyWith<$Res> { + _$MessageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_value.copyWith( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $MessageContentCopyWith<$Res> get content { + return $MessageContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $UsageCopyWith<$Res>? get usage { + if (_value.usage == null) { + return null; + } + + return $UsageCopyWith<$Res>(_value.usage!, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { + factory _$$MessageImplCopyWith( + _$MessageImpl value, $Res Function(_$MessageImpl) then) = + __$$MessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? id, + @_MessageContentConverter() MessageContent content, + MessageRole role, + @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence, + @JsonKey(includeIfNull: false) String? type, + @JsonKey(includeIfNull: false) Usage? usage}); + + @override + $MessageContentCopyWith<$Res> get content; + @override + $UsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class __$$MessageImplCopyWithImpl<$Res> + extends _$MessageCopyWithImpl<$Res, _$MessageImpl> + implements _$$MessageImplCopyWith<$Res> { + __$$MessageImplCopyWithImpl( + _$MessageImpl _value, $Res Function(_$MessageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = freezed, + Object? content = null, + Object? role = null, + Object? model = freezed, + Object? stopReason = freezed, + Object? stopSequence = freezed, + Object? type = freezed, + Object? usage = freezed, + }) { + return _then(_$MessageImpl( + id: freezed == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String?, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as MessageContent, + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as MessageRole, + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + type: freezed == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String?, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as Usage?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageImpl extends _Message { + const _$MessageImpl( + {@JsonKey(includeIfNull: false) this.id, + @_MessageContentConverter() required this.content, + required this.role, + @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence, + @JsonKey(includeIfNull: false) this.type, + @JsonKey(includeIfNull: false) this.usage}) + : super._(); + + factory _$MessageImpl.fromJson(Map json) => + _$$MessageImplFromJson(json); + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @override + @JsonKey(includeIfNull: false) + final String? id; + + /// The content of the message. + @override + @_MessageContentConverter() + final MessageContent content; + + /// The role of the messages author. + @override + final MessageRole role; + + /// The model that handled the request. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @override + @JsonKey(includeIfNull: false) + final String? type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + @JsonKey(includeIfNull: false) + final Usage? usage; + + @override + String toString() { + return 'Message(id: $id, content: $content, role: $role, model: $model, stopReason: $stopReason, stopSequence: $stopSequence, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.content, content) || other.content == content) && + (identical(other.role, role) || other.role == role) && + (identical(other.model, model) || other.model == model) && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, content, role, model, + stopReason, stopSequence, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + __$$MessageImplCopyWithImpl<_$MessageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageImplToJson( + this, + ); + } +} + +abstract class _Message extends Message { + const factory _Message( + {@JsonKey(includeIfNull: false) final String? id, + @_MessageContentConverter() required final MessageContent content, + required final MessageRole role, + @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence, + @JsonKey(includeIfNull: false) final String? type, + @JsonKey(includeIfNull: false) final Usage? usage}) = _$MessageImpl; + const _Message._() : super._(); + + factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; + + @override + + /// Unique object identifier. + /// + /// The format and length of IDs may change over time. + @JsonKey(includeIfNull: false) + String? get id; + @override + + /// The content of the message. + @_MessageContentConverter() + MessageContent get content; + @override + + /// The role of the messages author. + MessageRole get role; + @override + + /// The model that handled the request. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + + /// Object type. + /// + /// For Messages, this is always `"message"`. + @JsonKey(includeIfNull: false) + String? get type; + @override + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @JsonKey(includeIfNull: false) + Usage? get usage; + @override + @JsonKey(ignore: true) + _$$MessageImplCopyWith<_$MessageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageContent _$MessageContentFromJson(Map json) { + switch (json['runtimeType']) { + case 'blocks': + return MessageContentBlocks.fromJson(json); + case 'text': + return MessageContentText.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'MessageContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$MessageContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageContentCopyWith<$Res> { + factory $MessageContentCopyWith( + MessageContent value, $Res Function(MessageContent) then) = + _$MessageContentCopyWithImpl<$Res, MessageContent>; +} + +/// @nodoc +class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> + implements $MessageContentCopyWith<$Res> { + _$MessageContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$MessageContentBlocksImplCopyWith<$Res> { + factory _$$MessageContentBlocksImplCopyWith(_$MessageContentBlocksImpl value, + $Res Function(_$MessageContentBlocksImpl) then) = + __$$MessageContentBlocksImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$MessageContentBlocksImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentBlocksImpl> + implements _$$MessageContentBlocksImplCopyWith<$Res> { + __$$MessageContentBlocksImplCopyWithImpl(_$MessageContentBlocksImpl _value, + $Res Function(_$MessageContentBlocksImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentBlocksImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentBlocksImpl extends MessageContentBlocks { + const _$MessageContentBlocksImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$MessageContentBlocksImpl.fromJson(Map json) => + _$$MessageContentBlocksImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentBlocksImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> + get copyWith => + __$$MessageContentBlocksImplCopyWithImpl<_$MessageContentBlocksImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentBlocksImplToJson( + this, + ); + } +} + +abstract class MessageContentBlocks extends MessageContent { + const factory MessageContentBlocks(final List value) = + _$MessageContentBlocksImpl; + const MessageContentBlocks._() : super._(); + + factory MessageContentBlocks.fromJson(Map json) = + _$MessageContentBlocksImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$MessageContentBlocksImplCopyWith<_$MessageContentBlocksImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageContentTextImplCopyWith<$Res> { + factory _$$MessageContentTextImplCopyWith(_$MessageContentTextImpl value, + $Res Function(_$MessageContentTextImpl) then) = + __$$MessageContentTextImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$MessageContentTextImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextImpl> + implements _$$MessageContentTextImplCopyWith<$Res> { + __$$MessageContentTextImplCopyWithImpl(_$MessageContentTextImpl _value, + $Res Function(_$MessageContentTextImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$MessageContentTextImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageContentTextImpl extends MessageContentText { + const _$MessageContentTextImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$MessageContentTextImpl.fromJson(Map json) => + _$$MessageContentTextImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'MessageContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageContentTextImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + __$$MessageContentTextImplCopyWithImpl<_$MessageContentTextImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageContentBlocks value) blocks, + required TResult Function(MessageContentText value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentBlocks value)? blocks, + TResult? Function(MessageContentText value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentBlocks value)? blocks, + TResult Function(MessageContentText value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageContentTextImplToJson( + this, + ); + } +} + +abstract class MessageContentText extends MessageContent { + const factory MessageContentText(final String value) = + _$MessageContentTextImpl; + const MessageContentText._() : super._(); + + factory MessageContentText.fromJson(Map json) = + _$MessageContentTextImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); +} + +/// @nodoc +mixin _$Tool { + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name => throw _privateConstructorUsedError; + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value.inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(name: 'input_schema') Map inputSchema}); +} + +/// @nodoc +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? inputSchema = null, + }) { + return _then(_$ToolImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + inputSchema: null == inputSchema + ? _value._inputSchema + : inputSchema // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) + : _inputSchema = inputSchema, + super._(); + + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + @override + final String name; + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + final Map _inputSchema; + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @override + @JsonKey(name: 'input_schema') + Map get inputSchema { + if (_inputSchema is EqualUnmodifiableMapView) return _inputSchema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_inputSchema); + } + + @override + String toString() { + return 'Tool(name: $name, description: $description, inputSchema: $inputSchema)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._inputSchema, _inputSchema)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_inputSchema)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ToolImplToJson( + this, + ); + } +} + +abstract class _Tool extends Tool { + const factory _Tool( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(name: 'input_schema') + required final Map inputSchema}) = _$ToolImpl; + const _Tool._() : super._(); + + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; + + @override + + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + String get name; + @override + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) + String? get description; + @override + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') + Map get inputSchema; + @override + @JsonKey(ignore: true) + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ImageBlockSource _$ImageBlockSourceFromJson(Map json) { + return _ImageBlockSource.fromJson(json); +} + +/// @nodoc +mixin _$ImageBlockSource { + /// The base64-encoded image data. + String get data => throw _privateConstructorUsedError; + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType => throw _privateConstructorUsedError; + + /// The type of image source. + ImageBlockSourceType get type => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ImageBlockSourceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ImageBlockSourceCopyWith<$Res> { + factory $ImageBlockSourceCopyWith( + ImageBlockSource value, $Res Function(ImageBlockSource) then) = + _$ImageBlockSourceCopyWithImpl<$Res, ImageBlockSource>; + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class _$ImageBlockSourceCopyWithImpl<$Res, $Val extends ImageBlockSource> + implements $ImageBlockSourceCopyWith<$Res> { + _$ImageBlockSourceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ImageBlockSourceImplCopyWith<$Res> + implements $ImageBlockSourceCopyWith<$Res> { + factory _$$ImageBlockSourceImplCopyWith(_$ImageBlockSourceImpl value, + $Res Function(_$ImageBlockSourceImpl) then) = + __$$ImageBlockSourceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String data, + @JsonKey(name: 'media_type') ImageBlockSourceMediaType mediaType, + ImageBlockSourceType type}); +} + +/// @nodoc +class __$$ImageBlockSourceImplCopyWithImpl<$Res> + extends _$ImageBlockSourceCopyWithImpl<$Res, _$ImageBlockSourceImpl> + implements _$$ImageBlockSourceImplCopyWith<$Res> { + __$$ImageBlockSourceImplCopyWithImpl(_$ImageBlockSourceImpl _value, + $Res Function(_$ImageBlockSourceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? mediaType = null, + Object? type = null, + }) { + return _then(_$ImageBlockSourceImpl( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as String, + mediaType: null == mediaType + ? _value.mediaType + : mediaType // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceMediaType, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ImageBlockSourceType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockSourceImpl extends _ImageBlockSource { + const _$ImageBlockSourceImpl( + {required this.data, + @JsonKey(name: 'media_type') required this.mediaType, + required this.type}) + : super._(); + + factory _$ImageBlockSourceImpl.fromJson(Map json) => + _$$ImageBlockSourceImplFromJson(json); + + /// The base64-encoded image data. + @override + final String data; + + /// The media type of the image. + @override + @JsonKey(name: 'media_type') + final ImageBlockSourceMediaType mediaType; + + /// The type of image source. + @override + final ImageBlockSourceType type; + + @override + String toString() { + return 'ImageBlockSource(data: $data, mediaType: $mediaType, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockSourceImpl && + (identical(other.data, data) || other.data == data) && + (identical(other.mediaType, mediaType) || + other.mediaType == mediaType) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, data, mediaType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + __$$ImageBlockSourceImplCopyWithImpl<_$ImageBlockSourceImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ImageBlockSourceImplToJson( + this, + ); + } +} + +abstract class _ImageBlockSource extends ImageBlockSource { + const factory _ImageBlockSource( + {required final String data, + @JsonKey(name: 'media_type') + required final ImageBlockSourceMediaType mediaType, + required final ImageBlockSourceType type}) = _$ImageBlockSourceImpl; + const _ImageBlockSource._() : super._(); + + factory _ImageBlockSource.fromJson(Map json) = + _$ImageBlockSourceImpl.fromJson; + + @override + + /// The base64-encoded image data. + String get data; + @override + + /// The media type of the image. + @JsonKey(name: 'media_type') + ImageBlockSourceMediaType get mediaType; + @override + + /// The type of image source. + ImageBlockSourceType get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockSourceImplCopyWith<_$ImageBlockSourceImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Usage _$UsageFromJson(Map json) { + return _Usage.fromJson(json); +} + +/// @nodoc +mixin _$Usage { + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens => throw _privateConstructorUsedError; + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $UsageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $UsageCopyWith<$Res> { + factory $UsageCopyWith(Usage value, $Res Function(Usage) then) = + _$UsageCopyWithImpl<$Res, Usage>; + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$UsageCopyWithImpl<$Res, $Val extends Usage> + implements $UsageCopyWith<$Res> { + _$UsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$UsageImplCopyWith<$Res> implements $UsageCopyWith<$Res> { + factory _$$UsageImplCopyWith( + _$UsageImpl value, $Res Function(_$UsageImpl) then) = + __$$UsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'input_tokens') int inputTokens, + @JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$UsageImplCopyWithImpl<$Res> + extends _$UsageCopyWithImpl<$Res, _$UsageImpl> + implements _$$UsageImplCopyWith<$Res> { + __$$UsageImplCopyWithImpl( + _$UsageImpl _value, $Res Function(_$UsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputTokens = null, + Object? outputTokens = null, + }) { + return _then(_$UsageImpl( + inputTokens: null == inputTokens + ? _value.inputTokens + : inputTokens // ignore: cast_nullable_to_non_nullable + as int, + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UsageImpl extends _Usage { + const _$UsageImpl( + {@JsonKey(name: 'input_tokens') required this.inputTokens, + @JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$UsageImpl.fromJson(Map json) => + _$$UsageImplFromJson(json); + + /// The number of input tokens which were used. + @override + @JsonKey(name: 'input_tokens') + final int inputTokens; + + /// The number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'Usage(inputTokens: $inputTokens, outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UsageImpl && + (identical(other.inputTokens, inputTokens) || + other.inputTokens == inputTokens) && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, inputTokens, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + __$$UsageImplCopyWithImpl<_$UsageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$UsageImplToJson( + this, + ); + } +} + +abstract class _Usage extends Usage { + const factory _Usage( + {@JsonKey(name: 'input_tokens') required final int inputTokens, + @JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$UsageImpl; + const _Usage._() : super._(); + + factory _Usage.fromJson(Map json) = _$UsageImpl.fromJson; + + @override + + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') + int get inputTokens; + @override + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$UsageImplCopyWith<_$UsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDelta _$MessageDeltaFromJson(Map json) { + return _MessageDelta.fromJson(json); +} + +/// @nodoc +mixin _$MessageDelta { + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason => throw _privateConstructorUsedError; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaCopyWith<$Res> { + factory $MessageDeltaCopyWith( + MessageDelta value, $Res Function(MessageDelta) then) = + _$MessageDeltaCopyWithImpl<$Res, MessageDelta>; + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> + implements $MessageDeltaCopyWith<$Res> { + _$MessageDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_value.copyWith( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaImplCopyWith<$Res> + implements $MessageDeltaCopyWith<$Res> { + factory _$$MessageDeltaImplCopyWith( + _$MessageDeltaImpl value, $Res Function(_$MessageDeltaImpl) then) = + __$$MessageDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? stopSequence}); +} + +/// @nodoc +class __$$MessageDeltaImplCopyWithImpl<$Res> + extends _$MessageDeltaCopyWithImpl<$Res, _$MessageDeltaImpl> + implements _$$MessageDeltaImplCopyWith<$Res> { + __$$MessageDeltaImplCopyWithImpl( + _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? stopReason = freezed, + Object? stopSequence = freezed, + }) { + return _then(_$MessageDeltaImpl( + stopReason: freezed == stopReason + ? _value.stopReason + : stopReason // ignore: cast_nullable_to_non_nullable + as StopReason?, + stopSequence: freezed == stopSequence + ? _value.stopSequence + : stopSequence // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaImpl extends _MessageDelta { + const _$MessageDeltaImpl( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) this.stopSequence}) + : super._(); + + factory _$MessageDeltaImpl.fromJson(Map json) => + _$$MessageDeltaImplFromJson(json); + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @override + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason; + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @override + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence; + + @override + String toString() { + return 'MessageDelta(stopReason: $stopReason, stopSequence: $stopSequence)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaImpl && + (identical(other.stopReason, stopReason) || + other.stopReason == stopReason) && + (identical(other.stopSequence, stopSequence) || + other.stopSequence == stopSequence)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, stopReason, stopSequence); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + __$$MessageDeltaImplCopyWithImpl<_$MessageDeltaImpl>(this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaImplToJson( + this, + ); + } +} + +abstract class _MessageDelta extends MessageDelta { + const factory _MessageDelta( + {@JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final StopReason? stopReason, + @JsonKey(name: 'stop_sequence', includeIfNull: false) + final String? stopSequence}) = _$MessageDeltaImpl; + const _MessageDelta._() : super._(); + + factory _MessageDelta.fromJson(Map json) = + _$MessageDeltaImpl.fromJson; + + @override + + /// The reason that we stopped. + /// + /// This may be one the following values: + /// + /// - `"end_turn"`: the model reached a natural stopping point + /// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + /// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + /// + /// In non-streaming mode this value is always non-null. In streaming mode, it is + /// null in the `message_start` event and non-null otherwise. + @JsonKey( + name: 'stop_reason', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + StopReason? get stopReason; + @override + + /// Which custom stop sequence was generated, if any. + /// + /// This value will be a non-null string if one of your custom stop sequences was + /// generated. + @JsonKey(name: 'stop_sequence', includeIfNull: false) + String? get stopSequence; + @override + @JsonKey(ignore: true) + _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +MessageDeltaUsage _$MessageDeltaUsageFromJson(Map json) { + return _MessageDeltaUsage.fromJson(json); +} + +/// @nodoc +mixin _$MessageDeltaUsage { + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageDeltaUsageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaUsageCopyWith<$Res> { + factory $MessageDeltaUsageCopyWith( + MessageDeltaUsage value, $Res Function(MessageDeltaUsage) then) = + _$MessageDeltaUsageCopyWithImpl<$Res, MessageDeltaUsage>; + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class _$MessageDeltaUsageCopyWithImpl<$Res, $Val extends MessageDeltaUsage> + implements $MessageDeltaUsageCopyWith<$Res> { + _$MessageDeltaUsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_value.copyWith( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaUsageImplCopyWith<$Res> + implements $MessageDeltaUsageCopyWith<$Res> { + factory _$$MessageDeltaUsageImplCopyWith(_$MessageDeltaUsageImpl value, + $Res Function(_$MessageDeltaUsageImpl) then) = + __$$MessageDeltaUsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(name: 'output_tokens') int outputTokens}); +} + +/// @nodoc +class __$$MessageDeltaUsageImplCopyWithImpl<$Res> + extends _$MessageDeltaUsageCopyWithImpl<$Res, _$MessageDeltaUsageImpl> + implements _$$MessageDeltaUsageImplCopyWith<$Res> { + __$$MessageDeltaUsageImplCopyWithImpl(_$MessageDeltaUsageImpl _value, + $Res Function(_$MessageDeltaUsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? outputTokens = null, + }) { + return _then(_$MessageDeltaUsageImpl( + outputTokens: null == outputTokens + ? _value.outputTokens + : outputTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaUsageImpl extends _MessageDeltaUsage { + const _$MessageDeltaUsageImpl( + {@JsonKey(name: 'output_tokens') required this.outputTokens}) + : super._(); + + factory _$MessageDeltaUsageImpl.fromJson(Map json) => + _$$MessageDeltaUsageImplFromJson(json); + + /// The cumulative number of output tokens which were used. + @override + @JsonKey(name: 'output_tokens') + final int outputTokens; + + @override + String toString() { + return 'MessageDeltaUsage(outputTokens: $outputTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaUsageImpl && + (identical(other.outputTokens, outputTokens) || + other.outputTokens == outputTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, outputTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + __$$MessageDeltaUsageImplCopyWithImpl<_$MessageDeltaUsageImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$MessageDeltaUsageImplToJson( + this, + ); + } +} + +abstract class _MessageDeltaUsage extends MessageDeltaUsage { + const factory _MessageDeltaUsage( + {@JsonKey(name: 'output_tokens') required final int outputTokens}) = + _$MessageDeltaUsageImpl; + const _MessageDeltaUsage._() : super._(); + + factory _MessageDeltaUsage.fromJson(Map json) = + _$MessageDeltaUsageImpl.fromJson; + + @override + + /// The cumulative number of output tokens which were used. + @JsonKey(name: 'output_tokens') + int get outputTokens; + @override + @JsonKey(ignore: true) + _$$MessageDeltaUsageImplCopyWith<_$MessageDeltaUsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Block _$BlockFromJson(Map json) { + switch (json['type']) { + case 'text': + return TextBlock.fromJson(json); + case 'image': + return ImageBlock.fromJson(json); + case 'tool_use': + return ToolUseBlock.fromJson(json); + case 'tool_result': + return ToolResultBlock.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'Block', 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$Block { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockCopyWith<$Res> { + factory $BlockCopyWith(Block value, $Res Function(Block) then) = + _$BlockCopyWithImpl<$Res, Block>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockCopyWithImpl<$Res, $Val extends Block> + implements $BlockCopyWith<$Res> { + _$BlockCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$TextBlockImplCopyWith( + _$TextBlockImpl value, $Res Function(_$TextBlockImpl) then) = + __$$TextBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$TextBlockImpl> + implements _$$TextBlockImplCopyWith<$Res> { + __$$TextBlockImplCopyWithImpl( + _$TextBlockImpl _value, $Res Function(_$TextBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockImpl extends TextBlock { + const _$TextBlockImpl({required this.text, this.type = 'text'}) : super._(); + + factory _$TextBlockImpl.fromJson(Map json) => + _$$TextBlockImplFromJson(json); + + /// The text content. + @override + final String text; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.text(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + __$$TextBlockImplCopyWithImpl<_$TextBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return text(this.text, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return text?.call(this.text, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (text != null) { + return text(this.text, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$TextBlockImplToJson( + this, + ); + } +} + +abstract class TextBlock extends Block { + const factory TextBlock({required final String text, final String type}) = + _$TextBlockImpl; + const TextBlock._() : super._(); + + factory TextBlock.fromJson(Map json) = + _$TextBlockImpl.fromJson; + + /// The text content. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockImplCopyWith<_$TextBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ImageBlockImplCopyWith<$Res> implements $BlockCopyWith<$Res> { + factory _$$ImageBlockImplCopyWith( + _$ImageBlockImpl value, $Res Function(_$ImageBlockImpl) then) = + __$$ImageBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ImageBlockSource source, String type}); + + $ImageBlockSourceCopyWith<$Res> get source; +} + +/// @nodoc +class __$$ImageBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ImageBlockImpl> + implements _$$ImageBlockImplCopyWith<$Res> { + __$$ImageBlockImplCopyWithImpl( + _$ImageBlockImpl _value, $Res Function(_$ImageBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? source = null, + Object? type = null, + }) { + return _then(_$ImageBlockImpl( + source: null == source + ? _value.source + : source // ignore: cast_nullable_to_non_nullable + as ImageBlockSource, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ImageBlockSourceCopyWith<$Res> get source { + return $ImageBlockSourceCopyWith<$Res>(_value.source, (value) { + return _then(_value.copyWith(source: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageBlockImpl extends ImageBlock { + const _$ImageBlockImpl({required this.source, this.type = 'image'}) + : super._(); + + factory _$ImageBlockImpl.fromJson(Map json) => + _$$ImageBlockImplFromJson(json); + + /// The source of an image block. + @override + final ImageBlockSource source; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.image(source: $source, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageBlockImpl && + (identical(other.source, source) || other.source == source) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, source, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + __$$ImageBlockImplCopyWithImpl<_$ImageBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return image(source, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return image?.call(source, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (image != null) { + return image(source, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return image(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return image?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (image != null) { + return image(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ImageBlockImplToJson( + this, + ); + } +} + +abstract class ImageBlock extends Block { + const factory ImageBlock( + {required final ImageBlockSource source, + final String type}) = _$ImageBlockImpl; + const ImageBlock._() : super._(); + + factory ImageBlock.fromJson(Map json) = + _$ImageBlockImpl.fromJson; + + /// The source of an image block. + ImageBlockSource get source; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ImageBlockImplCopyWith<_$ImageBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolUseBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolUseBlockImplCopyWith( + _$ToolUseBlockImpl value, $Res Function(_$ToolUseBlockImpl) then) = + __$$ToolUseBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String id, String name, Map input, String type}); +} + +/// @nodoc +class __$$ToolUseBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolUseBlockImpl> + implements _$$ToolUseBlockImplCopyWith<$Res> { + __$$ToolUseBlockImplCopyWithImpl( + _$ToolUseBlockImpl _value, $Res Function(_$ToolUseBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? name = null, + Object? input = null, + Object? type = null, + }) { + return _then(_$ToolUseBlockImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + input: null == input + ? _value._input + : input // ignore: cast_nullable_to_non_nullable + as Map, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolUseBlockImpl extends ToolUseBlock { + const _$ToolUseBlockImpl( + {required this.id, + required this.name, + required final Map input, + this.type = 'tool_use'}) + : _input = input, + super._(); + + factory _$ToolUseBlockImpl.fromJson(Map json) => + _$$ToolUseBlockImplFromJson(json); + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + @override + final String id; + + /// The name of the tool being used. + @override + final String name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + final Map _input; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + @override + Map get input { + if (_input is EqualUnmodifiableMapView) return _input; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_input); + } + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolUse(id: $id, name: $name, input: $input, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolUseBlockImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._input, _input) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, id, name, const DeepCollectionEquality().hash(_input), type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + __$$ToolUseBlockImplCopyWithImpl<_$ToolUseBlockImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolUse(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolUse?.call(id, name, input, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(id, name, input, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolUse(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolUse?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolUse != null) { + return toolUse(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolUseBlockImplToJson( + this, + ); + } +} + +abstract class ToolUseBlock extends Block { + const factory ToolUseBlock( + {required final String id, + required final String name, + required final Map input, + final String type}) = _$ToolUseBlockImpl; + const ToolUseBlock._() : super._(); + + factory ToolUseBlock.fromJson(Map json) = + _$ToolUseBlockImpl.fromJson; + + /// A unique identifier for this particular tool use block. + /// This will be used to match up the tool results later. + String get id; + + /// The name of the tool being used. + String get name; + + /// An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + Map get input; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolUseBlockImplCopyWith<_$ToolUseBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockImplCopyWith<$Res> + implements $BlockCopyWith<$Res> { + factory _$$ToolResultBlockImplCopyWith(_$ToolResultBlockImpl value, + $Res Function(_$ToolResultBlockImpl) then) = + __$$ToolResultBlockImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type}); + + $ToolResultBlockContentCopyWith<$Res> get content; +} + +/// @nodoc +class __$$ToolResultBlockImplCopyWithImpl<$Res> + extends _$BlockCopyWithImpl<$Res, _$ToolResultBlockImpl> + implements _$$ToolResultBlockImplCopyWith<$Res> { + __$$ToolResultBlockImplCopyWithImpl( + _$ToolResultBlockImpl _value, $Res Function(_$ToolResultBlockImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? toolUseId = null, + Object? content = null, + Object? isError = freezed, + Object? type = null, + }) { + return _then(_$ToolResultBlockImpl( + toolUseId: null == toolUseId + ? _value.toolUseId + : toolUseId // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as ToolResultBlockContent, + isError: freezed == isError + ? _value.isError + : isError // ignore: cast_nullable_to_non_nullable + as bool?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } + + @override + @pragma('vm:prefer-inline') + $ToolResultBlockContentCopyWith<$Res> get content { + return $ToolResultBlockContentCopyWith<$Res>(_value.content, (value) { + return _then(_value.copyWith(content: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockImpl extends ToolResultBlock { + const _$ToolResultBlockImpl( + {@JsonKey(name: 'tool_use_id') required this.toolUseId, + @_ToolResultBlockContentConverter() required this.content, + @JsonKey(name: 'is_error', includeIfNull: false) this.isError, + this.type = 'tool_result'}) + : super._(); + + factory _$ToolResultBlockImpl.fromJson(Map json) => + _$$ToolResultBlockImplFromJson(json); + + /// The `id` of the tool use request this is a result for. + @override + @JsonKey(name: 'tool_use_id') + final String toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @override + @_ToolResultBlockContentConverter() + final ToolResultBlockContent content; + + /// Set to `true` if the tool execution resulted in an error. + @override + @JsonKey(name: 'is_error', includeIfNull: false) + final bool? isError; + + /// The type of content block. + @override + @JsonKey() + final String type; + + @override + String toString() { + return 'Block.toolResult(toolUseId: $toolUseId, content: $content, isError: $isError, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockImpl && + (identical(other.toolUseId, toolUseId) || + other.toolUseId == toolUseId) && + (identical(other.content, content) || other.content == content) && + (identical(other.isError, isError) || other.isError == isError) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, toolUseId, content, isError, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + __$$ToolResultBlockImplCopyWithImpl<_$ToolResultBlockImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) text, + required TResult Function(ImageBlockSource source, String type) image, + required TResult Function( + String id, String name, Map input, String type) + toolUse, + required TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type) + toolResult, + }) { + return toolResult(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? text, + TResult? Function(ImageBlockSource source, String type)? image, + TResult? Function( + String id, String name, Map input, String type)? + toolUse, + TResult? Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + }) { + return toolResult?.call(toolUseId, content, isError, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? text, + TResult Function(ImageBlockSource source, String type)? image, + TResult Function( + String id, String name, Map input, String type)? + toolUse, + TResult Function( + @JsonKey(name: 'tool_use_id') String toolUseId, + @_ToolResultBlockContentConverter() ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) bool? isError, + String type)? + toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(toolUseId, content, isError, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlock value) text, + required TResult Function(ImageBlock value) image, + required TResult Function(ToolUseBlock value) toolUse, + required TResult Function(ToolResultBlock value) toolResult, + }) { + return toolResult(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlock value)? text, + TResult? Function(ImageBlock value)? image, + TResult? Function(ToolUseBlock value)? toolUse, + TResult? Function(ToolResultBlock value)? toolResult, + }) { + return toolResult?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlock value)? text, + TResult Function(ImageBlock value)? image, + TResult Function(ToolUseBlock value)? toolUse, + TResult Function(ToolResultBlock value)? toolResult, + required TResult orElse(), + }) { + if (toolResult != null) { + return toolResult(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockImplToJson( + this, + ); + } +} + +abstract class ToolResultBlock extends Block { + const factory ToolResultBlock( + {@JsonKey(name: 'tool_use_id') required final String toolUseId, + @_ToolResultBlockContentConverter() + required final ToolResultBlockContent content, + @JsonKey(name: 'is_error', includeIfNull: false) final bool? isError, + final String type}) = _$ToolResultBlockImpl; + const ToolResultBlock._() : super._(); + + factory ToolResultBlock.fromJson(Map json) = + _$ToolResultBlockImpl.fromJson; + + /// The `id` of the tool use request this is a result for. + @JsonKey(name: 'tool_use_id') + String get toolUseId; + + /// The result of the tool, as a string (e.g. `"content": "15 degrees"`) + /// or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + /// These content blocks can use the text or image types. + @_ToolResultBlockContentConverter() + ToolResultBlockContent get content; + + /// Set to `true` if the tool execution resulted in an error. + @JsonKey(name: 'is_error', includeIfNull: false) + bool? get isError; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$ToolResultBlockImplCopyWith<_$ToolResultBlockImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ToolResultBlockContent _$ToolResultBlockContentFromJson( + Map json) { + switch (json['runtimeType']) { + case 'blocks': + return ToolResultBlockContentBlocks.fromJson(json); + case 'text': + return ToolResultBlockContentText.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ToolResultBlockContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ToolResultBlockContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ToolResultBlockContentCopyWith<$Res> { + factory $ToolResultBlockContentCopyWith(ToolResultBlockContent value, + $Res Function(ToolResultBlockContent) then) = + _$ToolResultBlockContentCopyWithImpl<$Res, ToolResultBlockContent>; +} + +/// @nodoc +class _$ToolResultBlockContentCopyWithImpl<$Res, + $Val extends ToolResultBlockContent> + implements $ToolResultBlockContentCopyWith<$Res> { + _$ToolResultBlockContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + factory _$$ToolResultBlockContentBlocksImplCopyWith( + _$ToolResultBlockContentBlocksImpl value, + $Res Function(_$ToolResultBlockContentBlocksImpl) then) = + __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$ToolResultBlockContentBlocksImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentBlocksImpl> + implements _$$ToolResultBlockContentBlocksImplCopyWith<$Res> { + __$$ToolResultBlockContentBlocksImplCopyWithImpl( + _$ToolResultBlockContentBlocksImpl _value, + $Res Function(_$ToolResultBlockContentBlocksImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentBlocksImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentBlocksImpl extends ToolResultBlockContentBlocks { + const _$ToolResultBlockContentBlocksImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'blocks', + super._(); + + factory _$ToolResultBlockContentBlocksImpl.fromJson( + Map json) => + _$$ToolResultBlockContentBlocksImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.blocks(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentBlocksImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> + get copyWith => __$$ToolResultBlockContentBlocksImplCopyWithImpl< + _$ToolResultBlockContentBlocksImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return blocks(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return blocks?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, + }) { + return blocks(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, + }) { + return blocks?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, + required TResult orElse(), + }) { + if (blocks != null) { + return blocks(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentBlocksImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentBlocks extends ToolResultBlockContent { + const factory ToolResultBlockContentBlocks(final List value) = + _$ToolResultBlockContentBlocksImpl; + const ToolResultBlockContentBlocks._() : super._(); + + factory ToolResultBlockContentBlocks.fromJson(Map json) = + _$ToolResultBlockContentBlocksImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentBlocksImplCopyWith< + _$ToolResultBlockContentBlocksImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ToolResultBlockContentTextImplCopyWith<$Res> { + factory _$$ToolResultBlockContentTextImplCopyWith( + _$ToolResultBlockContentTextImpl value, + $Res Function(_$ToolResultBlockContentTextImpl) then) = + __$$ToolResultBlockContentTextImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ToolResultBlockContentTextImplCopyWithImpl<$Res> + extends _$ToolResultBlockContentCopyWithImpl<$Res, + _$ToolResultBlockContentTextImpl> + implements _$$ToolResultBlockContentTextImplCopyWith<$Res> { + __$$ToolResultBlockContentTextImplCopyWithImpl( + _$ToolResultBlockContentTextImpl _value, + $Res Function(_$ToolResultBlockContentTextImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ToolResultBlockContentTextImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ToolResultBlockContentTextImpl extends ToolResultBlockContentText { + const _$ToolResultBlockContentTextImpl(this.value, {final String? $type}) + : $type = $type ?? 'text', + super._(); + + factory _$ToolResultBlockContentTextImpl.fromJson( + Map json) => + _$$ToolResultBlockContentTextImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ToolResultBlockContent.text(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ToolResultBlockContentTextImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> + get copyWith => __$$ToolResultBlockContentTextImplCopyWithImpl< + _$ToolResultBlockContentTextImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) blocks, + required TResult Function(String value) text, + }) { + return text(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? blocks, + TResult? Function(String value)? text, + }) { + return text?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? blocks, + TResult Function(String value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ToolResultBlockContentBlocks value) blocks, + required TResult Function(ToolResultBlockContentText value) text, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ToolResultBlockContentBlocks value)? blocks, + TResult? Function(ToolResultBlockContentText value)? text, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ToolResultBlockContentBlocks value)? blocks, + TResult Function(ToolResultBlockContentText value)? text, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ToolResultBlockContentTextImplToJson( + this, + ); + } +} + +abstract class ToolResultBlockContentText extends ToolResultBlockContent { + const factory ToolResultBlockContentText(final String value) = + _$ToolResultBlockContentTextImpl; + const ToolResultBlockContentText._() : super._(); + + factory ToolResultBlockContentText.fromJson(Map json) = + _$ToolResultBlockContentTextImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$ToolResultBlockContentTextImplCopyWith<_$ToolResultBlockContentTextImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageStreamEvent _$MessageStreamEventFromJson(Map json) { + switch (json['type']) { + case 'message_start': + return MessageStartEvent.fromJson(json); + case 'message_delta': + return MessageDeltaEvent.fromJson(json); + case 'message_stop': + return MessageStopEvent.fromJson(json); + case 'content_block_start': + return ContentBlockStartEvent.fromJson(json); + case 'content_block_delta': + return ContentBlockDeltaEvent.fromJson(json); + case 'content_block_stop': + return ContentBlockStopEvent.fromJson(json); + case 'ping': + return PingEvent.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageStreamEvent', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageStreamEvent { + /// The type of a streaming event. + MessageStreamEventType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $MessageStreamEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageStreamEventCopyWith<$Res> { + factory $MessageStreamEventCopyWith( + MessageStreamEvent value, $Res Function(MessageStreamEvent) then) = + _$MessageStreamEventCopyWithImpl<$Res, MessageStreamEvent>; + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class _$MessageStreamEventCopyWithImpl<$Res, $Val extends MessageStreamEvent> + implements $MessageStreamEventCopyWith<$Res> { + _$MessageStreamEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStartEventImplCopyWith(_$MessageStartEventImpl value, + $Res Function(_$MessageStartEventImpl) then) = + __$$MessageStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({Message message, MessageStreamEventType type}); + + $MessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$MessageStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStartEventImpl> + implements _$$MessageStartEventImplCopyWith<$Res> { + __$$MessageStartEventImplCopyWithImpl(_$MessageStartEventImpl _value, + $Res Function(_$MessageStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? message = null, + Object? type = null, + }) { + return _then(_$MessageStartEventImpl( + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as Message, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStartEventImpl extends MessageStartEvent { + const _$MessageStartEventImpl({required this.message, required this.type}) + : super._(); + + factory _$MessageStartEventImpl.fromJson(Map json) => + _$$MessageStartEventImplFromJson(json); + + /// A message in a chat conversation. + @override + final Message message; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStart(message: $message, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStartEventImpl && + (identical(other.message, message) || other.message == message) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, message, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + __$$MessageStartEventImplCopyWithImpl<_$MessageStartEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStart(message, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStart?.call(message, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(message, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStart != null) { + return messageStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStartEventImplToJson( + this, + ); + } +} + +abstract class MessageStartEvent extends MessageStreamEvent { + const factory MessageStartEvent( + {required final Message message, + required final MessageStreamEventType type}) = _$MessageStartEventImpl; + const MessageStartEvent._() : super._(); + + factory MessageStartEvent.fromJson(Map json) = + _$MessageStartEventImpl.fromJson; + + /// A message in a chat conversation. + Message get message; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStartEventImplCopyWith<_$MessageStartEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageDeltaEventImplCopyWith(_$MessageDeltaEventImpl value, + $Res Function(_$MessageDeltaEventImpl) then) = + __$$MessageDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {MessageDelta delta, + MessageStreamEventType type, + MessageDeltaUsage usage}); + + $MessageDeltaCopyWith<$Res> get delta; + $MessageDeltaUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$MessageDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageDeltaEventImpl> + implements _$$MessageDeltaEventImplCopyWith<$Res> { + __$$MessageDeltaEventImplCopyWithImpl(_$MessageDeltaEventImpl _value, + $Res Function(_$MessageDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? type = null, + Object? usage = null, + }) { + return _then(_$MessageDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as MessageDelta, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as MessageDeltaUsage, + )); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaCopyWith<$Res> get delta { + return $MessageDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } + + @override + @pragma('vm:prefer-inline') + $MessageDeltaUsageCopyWith<$Res> get usage { + return $MessageDeltaUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageDeltaEventImpl extends MessageDeltaEvent { + const _$MessageDeltaEventImpl( + {required this.delta, required this.type, required this.usage}) + : super._(); + + factory _$MessageDeltaEventImpl.fromJson(Map json) => + _$$MessageDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + @override + final MessageDelta delta; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + @override + final MessageDeltaUsage usage; + + @override + String toString() { + return 'MessageStreamEvent.messageDelta(delta: $delta, type: $type, usage: $usage)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.type, type) || other.type == type) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, type, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + __$$MessageDeltaEventImplCopyWithImpl<_$MessageDeltaEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageDelta(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageDelta?.call(delta, type, usage); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(delta, type, usage); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageDelta != null) { + return messageDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageDeltaEventImplToJson( + this, + ); + } +} + +abstract class MessageDeltaEvent extends MessageStreamEvent { + const factory MessageDeltaEvent( + {required final MessageDelta delta, + required final MessageStreamEventType type, + required final MessageDeltaUsage usage}) = _$MessageDeltaEventImpl; + const MessageDeltaEvent._() : super._(); + + factory MessageDeltaEvent.fromJson(Map json) = + _$MessageDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + MessageDelta get delta; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + + /// Billing and rate-limit usage. + /// + /// Anthropic's API bills and rate-limits by token counts, as tokens represent the + /// underlying cost to our systems. + /// + /// Under the hood, the API transforms requests into a format suitable for the + /// model. The model's output then goes through a parsing stage before becoming an + /// API response. As a result, the token counts in `usage` will not match one-to-one + /// with the exact visible content of an API request or response. + /// + /// For example, `output_tokens` will be non-zero, even for an empty string response + /// from Claude. + MessageDeltaUsage get usage; + @override + @JsonKey(ignore: true) + _$$MessageDeltaEventImplCopyWith<_$MessageDeltaEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$MessageStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$MessageStopEventImplCopyWith(_$MessageStopEventImpl value, + $Res Function(_$MessageStopEventImpl) then) = + __$$MessageStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$MessageStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$MessageStopEventImpl> + implements _$$MessageStopEventImplCopyWith<$Res> { + __$$MessageStopEventImplCopyWithImpl(_$MessageStopEventImpl _value, + $Res Function(_$MessageStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$MessageStopEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$MessageStopEventImpl extends MessageStopEvent { + const _$MessageStopEventImpl({required this.type}) : super._(); + + factory _$MessageStopEventImpl.fromJson(Map json) => + _$$MessageStopEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.messageStop(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$MessageStopEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + __$$MessageStopEventImplCopyWithImpl<_$MessageStopEventImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return messageStop(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return messageStop?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return messageStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return messageStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (messageStop != null) { + return messageStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$MessageStopEventImplToJson( + this, + ); + } +} + +abstract class MessageStopEvent extends MessageStreamEvent { + const factory MessageStopEvent({required final MessageStreamEventType type}) = + _$MessageStopEventImpl; + const MessageStopEvent._() : super._(); + + factory MessageStopEvent.fromJson(Map json) = + _$MessageStopEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$MessageStopEventImplCopyWith<_$MessageStopEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStartEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStartEventImplCopyWith( + _$ContentBlockStartEventImpl value, + $Res Function(_$ContentBlockStartEventImpl) then) = + __$$ContentBlockStartEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type}); + + $BlockCopyWith<$Res> get contentBlock; +} + +/// @nodoc +class __$$ContentBlockStartEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStartEventImpl> + implements _$$ContentBlockStartEventImplCopyWith<$Res> { + __$$ContentBlockStartEventImplCopyWithImpl( + _$ContentBlockStartEventImpl _value, + $Res Function(_$ContentBlockStartEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? contentBlock = null, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStartEventImpl( + contentBlock: null == contentBlock + ? _value.contentBlock + : contentBlock // ignore: cast_nullable_to_non_nullable + as Block, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $BlockCopyWith<$Res> get contentBlock { + return $BlockCopyWith<$Res>(_value.contentBlock, (value) { + return _then(_value.copyWith(contentBlock: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStartEventImpl extends ContentBlockStartEvent { + const _$ContentBlockStartEventImpl( + {@JsonKey(name: 'content_block') required this.contentBlock, + required this.index, + required this.type}) + : super._(); + + factory _$ContentBlockStartEventImpl.fromJson(Map json) => + _$$ContentBlockStartEventImplFromJson(json); + + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @override + @JsonKey(name: 'content_block') + final Block contentBlock; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStart(contentBlock: $contentBlock, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStartEventImpl && + (identical(other.contentBlock, contentBlock) || + other.contentBlock == contentBlock) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, contentBlock, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => __$$ContentBlockStartEventImplCopyWithImpl< + _$ContentBlockStartEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStart(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStart?.call(contentBlock, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(contentBlock, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStart(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStart?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStart != null) { + return contentBlockStart(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStartEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStartEvent extends MessageStreamEvent { + const factory ContentBlockStartEvent( + {@JsonKey(name: 'content_block') required final Block contentBlock, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStartEventImpl; + const ContentBlockStartEvent._() : super._(); + + factory ContentBlockStartEvent.fromJson(Map json) = + _$ContentBlockStartEventImpl.fromJson; + + /// A block of content in a message. + /// Any of: [TextBlock], [ImageBlock], [ToolUseBlock], [ToolResultBlock] + @JsonKey(name: 'content_block') + Block get contentBlock; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStartEventImplCopyWith<_$ContentBlockStartEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockDeltaEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockDeltaEventImplCopyWith( + _$ContentBlockDeltaEventImpl value, + $Res Function(_$ContentBlockDeltaEventImpl) then) = + __$$ContentBlockDeltaEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({BlockDelta delta, int index, MessageStreamEventType type}); + + $BlockDeltaCopyWith<$Res> get delta; +} + +/// @nodoc +class __$$ContentBlockDeltaEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockDeltaEventImpl> + implements _$$ContentBlockDeltaEventImplCopyWith<$Res> { + __$$ContentBlockDeltaEventImplCopyWithImpl( + _$ContentBlockDeltaEventImpl _value, + $Res Function(_$ContentBlockDeltaEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockDeltaEventImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as BlockDelta, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } + + @override + @pragma('vm:prefer-inline') + $BlockDeltaCopyWith<$Res> get delta { + return $BlockDeltaCopyWith<$Res>(_value.delta, (value) { + return _then(_value.copyWith(delta: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockDeltaEventImpl extends ContentBlockDeltaEvent { + const _$ContentBlockDeltaEventImpl( + {required this.delta, required this.index, required this.type}) + : super._(); + + factory _$ContentBlockDeltaEventImpl.fromJson(Map json) => + _$$ContentBlockDeltaEventImplFromJson(json); + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + @override + final BlockDelta delta; + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockDelta(delta: $delta, index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockDeltaEventImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => __$$ContentBlockDeltaEventImplCopyWithImpl< + _$ContentBlockDeltaEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockDelta(delta, index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockDelta?.call(delta, index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(delta, index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockDelta != null) { + return contentBlockDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockDeltaEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockDeltaEvent extends MessageStreamEvent { + const factory ContentBlockDeltaEvent( + {required final BlockDelta delta, + required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockDeltaEventImpl; + const ContentBlockDeltaEvent._() : super._(); + + factory ContentBlockDeltaEvent.fromJson(Map json) = + _$ContentBlockDeltaEventImpl.fromJson; + + /// A delta in a streaming message. + /// Any of: [TextBlockDelta], [InputJsonBlockDelta] + BlockDelta get delta; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockDeltaEventImplCopyWith<_$ContentBlockDeltaEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ContentBlockStopEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$ContentBlockStopEventImplCopyWith( + _$ContentBlockStopEventImpl value, + $Res Function(_$ContentBlockStopEventImpl) then) = + __$$ContentBlockStopEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int index, MessageStreamEventType type}); +} + +/// @nodoc +class __$$ContentBlockStopEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$ContentBlockStopEventImpl> + implements _$$ContentBlockStopEventImplCopyWith<$Res> { + __$$ContentBlockStopEventImplCopyWithImpl(_$ContentBlockStopEventImpl _value, + $Res Function(_$ContentBlockStopEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_$ContentBlockStopEventImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ContentBlockStopEventImpl extends ContentBlockStopEvent { + const _$ContentBlockStopEventImpl({required this.index, required this.type}) + : super._(); + + factory _$ContentBlockStopEventImpl.fromJson(Map json) => + _$$ContentBlockStopEventImplFromJson(json); + + /// The index of the content block. + @override + final int index; + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.contentBlockStop(index: $index, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ContentBlockStopEventImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, index, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => __$$ContentBlockStopEventImplCopyWithImpl< + _$ContentBlockStopEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return contentBlockStop(index, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return contentBlockStop?.call(index, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(index, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return contentBlockStop(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return contentBlockStop?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (contentBlockStop != null) { + return contentBlockStop(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ContentBlockStopEventImplToJson( + this, + ); + } +} + +abstract class ContentBlockStopEvent extends MessageStreamEvent { + const factory ContentBlockStopEvent( + {required final int index, + required final MessageStreamEventType type}) = + _$ContentBlockStopEventImpl; + const ContentBlockStopEvent._() : super._(); + + factory ContentBlockStopEvent.fromJson(Map json) = + _$ContentBlockStopEventImpl.fromJson; + + /// The index of the content block. + int get index; + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$ContentBlockStopEventImplCopyWith<_$ContentBlockStopEventImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$PingEventImplCopyWith<$Res> + implements $MessageStreamEventCopyWith<$Res> { + factory _$$PingEventImplCopyWith( + _$PingEventImpl value, $Res Function(_$PingEventImpl) then) = + __$$PingEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({MessageStreamEventType type}); +} + +/// @nodoc +class __$$PingEventImplCopyWithImpl<$Res> + extends _$MessageStreamEventCopyWithImpl<$Res, _$PingEventImpl> + implements _$$PingEventImplCopyWith<$Res> { + __$$PingEventImplCopyWithImpl( + _$PingEventImpl _value, $Res Function(_$PingEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_$PingEventImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as MessageStreamEventType, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$PingEventImpl extends PingEvent { + const _$PingEventImpl({required this.type}) : super._(); + + factory _$PingEventImpl.fromJson(Map json) => + _$$PingEventImplFromJson(json); + + /// The type of a streaming event. + @override + final MessageStreamEventType type; + + @override + String toString() { + return 'MessageStreamEvent.ping(type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$PingEventImpl && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + __$$PingEventImplCopyWithImpl<_$PingEventImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(Message message, MessageStreamEventType type) + messageStart, + required TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage) + messageDelta, + required TResult Function(MessageStreamEventType type) messageStop, + required TResult Function( + @JsonKey(name: 'content_block') Block contentBlock, + int index, + MessageStreamEventType type) + contentBlockStart, + required TResult Function( + BlockDelta delta, int index, MessageStreamEventType type) + contentBlockDelta, + required TResult Function(int index, MessageStreamEventType type) + contentBlockStop, + required TResult Function(MessageStreamEventType type) ping, + }) { + return ping(type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(Message message, MessageStreamEventType type)? + messageStart, + TResult? Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult? Function(MessageStreamEventType type)? messageStop, + TResult? Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult? Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult? Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult? Function(MessageStreamEventType type)? ping, + }) { + return ping?.call(type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(Message message, MessageStreamEventType type)? + messageStart, + TResult Function(MessageDelta delta, MessageStreamEventType type, + MessageDeltaUsage usage)? + messageDelta, + TResult Function(MessageStreamEventType type)? messageStop, + TResult Function(@JsonKey(name: 'content_block') Block contentBlock, + int index, MessageStreamEventType type)? + contentBlockStart, + TResult Function(BlockDelta delta, int index, MessageStreamEventType type)? + contentBlockDelta, + TResult Function(int index, MessageStreamEventType type)? contentBlockStop, + TResult Function(MessageStreamEventType type)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(MessageStartEvent value) messageStart, + required TResult Function(MessageDeltaEvent value) messageDelta, + required TResult Function(MessageStopEvent value) messageStop, + required TResult Function(ContentBlockStartEvent value) contentBlockStart, + required TResult Function(ContentBlockDeltaEvent value) contentBlockDelta, + required TResult Function(ContentBlockStopEvent value) contentBlockStop, + required TResult Function(PingEvent value) ping, + }) { + return ping(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageStartEvent value)? messageStart, + TResult? Function(MessageDeltaEvent value)? messageDelta, + TResult? Function(MessageStopEvent value)? messageStop, + TResult? Function(ContentBlockStartEvent value)? contentBlockStart, + TResult? Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult? Function(ContentBlockStopEvent value)? contentBlockStop, + TResult? Function(PingEvent value)? ping, + }) { + return ping?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageStartEvent value)? messageStart, + TResult Function(MessageDeltaEvent value)? messageDelta, + TResult Function(MessageStopEvent value)? messageStop, + TResult Function(ContentBlockStartEvent value)? contentBlockStart, + TResult Function(ContentBlockDeltaEvent value)? contentBlockDelta, + TResult Function(ContentBlockStopEvent value)? contentBlockStop, + TResult Function(PingEvent value)? ping, + required TResult orElse(), + }) { + if (ping != null) { + return ping(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$PingEventImplToJson( + this, + ); + } +} + +abstract class PingEvent extends MessageStreamEvent { + const factory PingEvent({required final MessageStreamEventType type}) = + _$PingEventImpl; + const PingEvent._() : super._(); + + factory PingEvent.fromJson(Map json) = + _$PingEventImpl.fromJson; + + @override + + /// The type of a streaming event. + MessageStreamEventType get type; + @override + @JsonKey(ignore: true) + _$$PingEventImplCopyWith<_$PingEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BlockDelta _$BlockDeltaFromJson(Map json) { + switch (json['type']) { + case 'text_delta': + return TextBlockDelta.fromJson(json); + case 'input_json_delta': + return InputJsonBlockDelta.fromJson(json); + + default: + throw CheckedFromJsonException( + json, 'type', 'BlockDelta', 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$BlockDelta { + /// The type of content block. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) textDelta, + required TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? textDelta, + TResult? Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? textDelta, + TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $BlockDeltaCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BlockDeltaCopyWith<$Res> { + factory $BlockDeltaCopyWith( + BlockDelta value, $Res Function(BlockDelta) then) = + _$BlockDeltaCopyWithImpl<$Res, BlockDelta>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$BlockDeltaCopyWithImpl<$Res, $Val extends BlockDelta> + implements $BlockDeltaCopyWith<$Res> { + _$BlockDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$TextBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$TextBlockDeltaImplCopyWith(_$TextBlockDeltaImpl value, + $Res Function(_$TextBlockDeltaImpl) then) = + __$$TextBlockDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String text, String type}); +} + +/// @nodoc +class __$$TextBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$TextBlockDeltaImpl> + implements _$$TextBlockDeltaImplCopyWith<$Res> { + __$$TextBlockDeltaImplCopyWithImpl( + _$TextBlockDeltaImpl _value, $Res Function(_$TextBlockDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? text = null, + Object? type = null, + }) { + return _then(_$TextBlockDeltaImpl( + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$TextBlockDeltaImpl extends TextBlockDelta { + const _$TextBlockDeltaImpl({required this.text, required this.type}) + : super._(); + + factory _$TextBlockDeltaImpl.fromJson(Map json) => + _$$TextBlockDeltaImplFromJson(json); + + /// The text delta. + @override + final String text; + + /// The type of content block. + @override + final String type; + + @override + String toString() { + return 'BlockDelta.textDelta(text: $text, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$TextBlockDeltaImpl && + (identical(other.text, text) || other.text == text) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, text, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + __$$TextBlockDeltaImplCopyWithImpl<_$TextBlockDeltaImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) textDelta, + required TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, + }) { + return textDelta(text, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? textDelta, + TResult? Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + }) { + return textDelta?.call(text, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? textDelta, + TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + required TResult orElse(), + }) { + if (textDelta != null) { + return textDelta(text, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, + }) { + return textDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, + }) { + return textDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, + required TResult orElse(), + }) { + if (textDelta != null) { + return textDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$TextBlockDeltaImplToJson( + this, + ); + } +} + +abstract class TextBlockDelta extends BlockDelta { + const factory TextBlockDelta( + {required final String text, + required final String type}) = _$TextBlockDeltaImpl; + const TextBlockDelta._() : super._(); + + factory TextBlockDelta.fromJson(Map json) = + _$TextBlockDeltaImpl.fromJson; + + /// The text delta. + String get text; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$TextBlockDeltaImplCopyWith<_$TextBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$InputJsonBlockDeltaImplCopyWith<$Res> + implements $BlockDeltaCopyWith<$Res> { + factory _$$InputJsonBlockDeltaImplCopyWith(_$InputJsonBlockDeltaImpl value, + $Res Function(_$InputJsonBlockDeltaImpl) then) = + __$$InputJsonBlockDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'partial_json', includeIfNull: false) String? partialJson, + String type}); +} + +/// @nodoc +class __$$InputJsonBlockDeltaImplCopyWithImpl<$Res> + extends _$BlockDeltaCopyWithImpl<$Res, _$InputJsonBlockDeltaImpl> + implements _$$InputJsonBlockDeltaImplCopyWith<$Res> { + __$$InputJsonBlockDeltaImplCopyWithImpl(_$InputJsonBlockDeltaImpl _value, + $Res Function(_$InputJsonBlockDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? partialJson = freezed, + Object? type = null, + }) { + return _then(_$InputJsonBlockDeltaImpl( + partialJson: freezed == partialJson + ? _value.partialJson + : partialJson // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$InputJsonBlockDeltaImpl extends InputJsonBlockDelta { + const _$InputJsonBlockDeltaImpl( + {@JsonKey(name: 'partial_json', includeIfNull: false) this.partialJson, + required this.type}) + : super._(); + + factory _$InputJsonBlockDeltaImpl.fromJson(Map json) => + _$$InputJsonBlockDeltaImplFromJson(json); + + /// The partial JSON delta. + @override + @JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson; + + /// The type of content block. + @override + final String type; + + @override + String toString() { + return 'BlockDelta.inputJsonDelta(partialJson: $partialJson, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$InputJsonBlockDeltaImpl && + (identical(other.partialJson, partialJson) || + other.partialJson == partialJson) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, partialJson, type); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => + __$$InputJsonBlockDeltaImplCopyWithImpl<_$InputJsonBlockDeltaImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String text, String type) textDelta, + required TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type) + inputJsonDelta, + }) { + return inputJsonDelta(partialJson, type); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String text, String type)? textDelta, + TResult? Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + }) { + return inputJsonDelta?.call(partialJson, type); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String text, String type)? textDelta, + TResult Function( + @JsonKey(name: 'partial_json', includeIfNull: false) + String? partialJson, + String type)? + inputJsonDelta, + required TResult orElse(), + }) { + if (inputJsonDelta != null) { + return inputJsonDelta(partialJson, type); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(TextBlockDelta value) textDelta, + required TResult Function(InputJsonBlockDelta value) inputJsonDelta, + }) { + return inputJsonDelta(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(TextBlockDelta value)? textDelta, + TResult? Function(InputJsonBlockDelta value)? inputJsonDelta, + }) { + return inputJsonDelta?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(TextBlockDelta value)? textDelta, + TResult Function(InputJsonBlockDelta value)? inputJsonDelta, + required TResult orElse(), + }) { + if (inputJsonDelta != null) { + return inputJsonDelta(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$InputJsonBlockDeltaImplToJson( + this, + ); + } +} + +abstract class InputJsonBlockDelta extends BlockDelta { + const factory InputJsonBlockDelta( + {@JsonKey(name: 'partial_json', includeIfNull: false) + final String? partialJson, + required final String type}) = _$InputJsonBlockDeltaImpl; + const InputJsonBlockDelta._() : super._(); + + factory InputJsonBlockDelta.fromJson(Map json) = + _$InputJsonBlockDeltaImpl.fromJson; + + /// The partial JSON delta. + @JsonKey(name: 'partial_json', includeIfNull: false) + String? get partialJson; + @override + + /// The type of content block. + String get type; + @override + @JsonKey(ignore: true) + _$$InputJsonBlockDeltaImplCopyWith<_$InputJsonBlockDeltaImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..dc8d9833 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,558 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$CreateMessageRequestImpl _$$CreateMessageRequestImplFromJson( + Map json) => + _$CreateMessageRequestImpl( + model: const _ModelConverter().fromJson(json['model']), + messages: (json['messages'] as List) + .map((e) => Message.fromJson(e as Map)) + .toList(), + maxTokens: (json['max_tokens'] as num).toInt(), + metadata: json['metadata'] == null + ? null + : CreateMessageRequestMetadata.fromJson( + json['metadata'] as Map), + stopSequences: (json['stop_sequences'] as List?) + ?.map((e) => e as String) + .toList(), + system: json['system'] as String?, + temperature: (json['temperature'] as num?)?.toDouble(), + toolChoice: json['tool_choice'] == null + ? null + : ToolChoice.fromJson(json['tool_choice'] as Map), + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), + topK: (json['top_k'] as num?)?.toInt(), + topP: (json['top_p'] as num?)?.toDouble(), + stream: json['stream'] as bool? ?? false, + ); + +Map _$$CreateMessageRequestImplToJson( + _$CreateMessageRequestImpl instance) { + final val = { + 'model': const _ModelConverter().toJson(instance.model), + 'messages': instance.messages.map((e) => e.toJson()).toList(), + 'max_tokens': instance.maxTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('metadata', instance.metadata?.toJson()); + writeNotNull('stop_sequences', instance.stopSequences); + writeNotNull('system', instance.system); + writeNotNull('temperature', instance.temperature); + writeNotNull('tool_choice', instance.toolChoice?.toJson()); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); + writeNotNull('top_k', instance.topK); + writeNotNull('top_p', instance.topP); + val['stream'] = instance.stream; + return val; +} + +_$ModelCatalogImpl _$$ModelCatalogImplFromJson(Map json) => + _$ModelCatalogImpl( + $enumDecode(_$ModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelCatalogImplToJson(_$ModelCatalogImpl instance) => + { + 'value': _$ModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ModelsEnumMap = { + Models.claude35Sonnet20240620: 'claude-3-5-sonnet-20240620', + Models.claude3Haiku20240307: 'claude-3-haiku-20240307', + Models.claude3Opus20240229: 'claude-3-opus-20240229', + Models.claude3Sonnet20240229: 'claude-3-sonnet-20240229', + Models.claude20: 'claude-2.0', + Models.claude21: 'claude-2.1', + Models.claudeInstant12: 'claude-instant-1.2', +}; + +_$ModelIdImpl _$$ModelIdImplFromJson(Map json) => + _$ModelIdImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ModelIdImplToJson(_$ModelIdImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateMessageRequestMetadataImpl _$$CreateMessageRequestMetadataImplFromJson( + Map json) => + _$CreateMessageRequestMetadataImpl( + userId: json['user_id'] as String?, + ); + +Map _$$CreateMessageRequestMetadataImplToJson( + _$CreateMessageRequestMetadataImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('user_id', instance.userId); + return val; +} + +_$ToolChoiceImpl _$$ToolChoiceImplFromJson(Map json) => + _$ToolChoiceImpl( + type: $enumDecode(_$ToolChoiceTypeEnumMap, json['type']), + name: json['name'] as String?, + ); + +Map _$$ToolChoiceImplToJson(_$ToolChoiceImpl instance) { + final val = { + 'type': _$ToolChoiceTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('name', instance.name); + return val; +} + +const _$ToolChoiceTypeEnumMap = { + ToolChoiceType.auto: 'auto', + ToolChoiceType.any: 'any', + ToolChoiceType.tool: 'tool', +}; + +_$MessageImpl _$$MessageImplFromJson(Map json) => + _$MessageImpl( + id: json['id'] as String?, + content: const _MessageContentConverter().fromJson(json['content']), + role: $enumDecode(_$MessageRoleEnumMap, json['role']), + model: json['model'] as String?, + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + type: json['type'] as String?, + usage: json['usage'] == null + ? null + : Usage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageImplToJson(_$MessageImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('id', instance.id); + val['content'] = const _MessageContentConverter().toJson(instance.content); + val['role'] = _$MessageRoleEnumMap[instance.role]!; + writeNotNull('model', instance.model); + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + writeNotNull('type', instance.type); + writeNotNull('usage', instance.usage?.toJson()); + return val; +} + +const _$MessageRoleEnumMap = { + MessageRole.user: 'user', + MessageRole.assistant: 'assistant', +}; + +const _$StopReasonEnumMap = { + StopReason.endTurn: 'end_turn', + StopReason.maxTokens: 'max_tokens', + StopReason.stopSequence: 'stop_sequence', + StopReason.toolUse: 'tool_use', +}; + +_$MessageContentBlocksImpl _$$MessageContentBlocksImplFromJson( + Map json) => + _$MessageContentBlocksImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentBlocksImplToJson( + _$MessageContentBlocksImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$MessageContentTextImpl _$$MessageContentTextImplFromJson( + Map json) => + _$MessageContentTextImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$MessageContentTextImplToJson( + _$MessageContentTextImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + name: json['name'] as String, + description: json['description'] as String?, + inputSchema: json['input_schema'] as Map, + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['input_schema'] = instance.inputSchema; + return val; +} + +_$ImageBlockSourceImpl _$$ImageBlockSourceImplFromJson( + Map json) => + _$ImageBlockSourceImpl( + data: json['data'] as String, + mediaType: + $enumDecode(_$ImageBlockSourceMediaTypeEnumMap, json['media_type']), + type: $enumDecode(_$ImageBlockSourceTypeEnumMap, json['type']), + ); + +Map _$$ImageBlockSourceImplToJson( + _$ImageBlockSourceImpl instance) => + { + 'data': instance.data, + 'media_type': _$ImageBlockSourceMediaTypeEnumMap[instance.mediaType]!, + 'type': _$ImageBlockSourceTypeEnumMap[instance.type]!, + }; + +const _$ImageBlockSourceMediaTypeEnumMap = { + ImageBlockSourceMediaType.imageJpeg: 'image/jpeg', + ImageBlockSourceMediaType.imagePng: 'image/png', + ImageBlockSourceMediaType.imageGif: 'image/gif', + ImageBlockSourceMediaType.imageWebp: 'image/webp', +}; + +const _$ImageBlockSourceTypeEnumMap = { + ImageBlockSourceType.base64: 'base64', +}; + +_$UsageImpl _$$UsageImplFromJson(Map json) => _$UsageImpl( + inputTokens: (json['input_tokens'] as num).toInt(), + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$UsageImplToJson(_$UsageImpl instance) => + { + 'input_tokens': instance.inputTokens, + 'output_tokens': instance.outputTokens, + }; + +_$MessageDeltaImpl _$$MessageDeltaImplFromJson(Map json) => + _$MessageDeltaImpl( + stopReason: $enumDecodeNullable(_$StopReasonEnumMap, json['stop_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + stopSequence: json['stop_sequence'] as String?, + ); + +Map _$$MessageDeltaImplToJson(_$MessageDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('stop_reason', _$StopReasonEnumMap[instance.stopReason]); + writeNotNull('stop_sequence', instance.stopSequence); + return val; +} + +_$MessageDeltaUsageImpl _$$MessageDeltaUsageImplFromJson( + Map json) => + _$MessageDeltaUsageImpl( + outputTokens: (json['output_tokens'] as num).toInt(), + ); + +Map _$$MessageDeltaUsageImplToJson( + _$MessageDeltaUsageImpl instance) => + { + 'output_tokens': instance.outputTokens, + }; + +_$TextBlockImpl _$$TextBlockImplFromJson(Map json) => + _$TextBlockImpl( + text: json['text'] as String, + type: json['type'] as String? ?? 'text', + ); + +Map _$$TextBlockImplToJson(_$TextBlockImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$ImageBlockImpl _$$ImageBlockImplFromJson(Map json) => + _$ImageBlockImpl( + source: ImageBlockSource.fromJson(json['source'] as Map), + type: json['type'] as String? ?? 'image', + ); + +Map _$$ImageBlockImplToJson(_$ImageBlockImpl instance) => + { + 'source': instance.source.toJson(), + 'type': instance.type, + }; + +_$ToolUseBlockImpl _$$ToolUseBlockImplFromJson(Map json) => + _$ToolUseBlockImpl( + id: json['id'] as String, + name: json['name'] as String, + input: json['input'] as Map, + type: json['type'] as String? ?? 'tool_use', + ); + +Map _$$ToolUseBlockImplToJson(_$ToolUseBlockImpl instance) => + { + 'id': instance.id, + 'name': instance.name, + 'input': instance.input, + 'type': instance.type, + }; + +_$ToolResultBlockImpl _$$ToolResultBlockImplFromJson( + Map json) => + _$ToolResultBlockImpl( + toolUseId: json['tool_use_id'] as String, + content: + const _ToolResultBlockContentConverter().fromJson(json['content']), + isError: json['is_error'] as bool?, + type: json['type'] as String? ?? 'tool_result', + ); + +Map _$$ToolResultBlockImplToJson( + _$ToolResultBlockImpl instance) { + final val = { + 'tool_use_id': instance.toolUseId, + 'content': + const _ToolResultBlockContentConverter().toJson(instance.content), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('is_error', instance.isError); + val['type'] = instance.type; + return val; +} + +_$ToolResultBlockContentBlocksImpl _$$ToolResultBlockContentBlocksImplFromJson( + Map json) => + _$ToolResultBlockContentBlocksImpl( + (json['value'] as List) + .map((e) => Block.fromJson(e as Map)) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentBlocksImplToJson( + _$ToolResultBlockContentBlocksImpl instance) => + { + 'value': instance.value.map((e) => e.toJson()).toList(), + 'runtimeType': instance.$type, + }; + +_$ToolResultBlockContentTextImpl _$$ToolResultBlockContentTextImplFromJson( + Map json) => + _$ToolResultBlockContentTextImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$ToolResultBlockContentTextImplToJson( + _$ToolResultBlockContentTextImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$MessageStartEventImpl _$$MessageStartEventImplFromJson( + Map json) => + _$MessageStartEventImpl( + message: Message.fromJson(json['message'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStartEventImplToJson( + _$MessageStartEventImpl instance) => + { + 'message': instance.message.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +const _$MessageStreamEventTypeEnumMap = { + MessageStreamEventType.messageStart: 'message_start', + MessageStreamEventType.messageDelta: 'message_delta', + MessageStreamEventType.messageStop: 'message_stop', + MessageStreamEventType.contentBlockStart: 'content_block_start', + MessageStreamEventType.contentBlockDelta: 'content_block_delta', + MessageStreamEventType.contentBlockStop: 'content_block_stop', + MessageStreamEventType.ping: 'ping', +}; + +_$MessageDeltaEventImpl _$$MessageDeltaEventImplFromJson( + Map json) => + _$MessageDeltaEventImpl( + delta: MessageDelta.fromJson(json['delta'] as Map), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + usage: MessageDeltaUsage.fromJson(json['usage'] as Map), + ); + +Map _$$MessageDeltaEventImplToJson( + _$MessageDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + 'usage': instance.usage.toJson(), + }; + +_$MessageStopEventImpl _$$MessageStopEventImplFromJson( + Map json) => + _$MessageStopEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$MessageStopEventImplToJson( + _$MessageStopEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStartEventImpl _$$ContentBlockStartEventImplFromJson( + Map json) => + _$ContentBlockStartEventImpl( + contentBlock: + Block.fromJson(json['content_block'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStartEventImplToJson( + _$ContentBlockStartEventImpl instance) => + { + 'content_block': instance.contentBlock.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockDeltaEventImpl _$$ContentBlockDeltaEventImplFromJson( + Map json) => + _$ContentBlockDeltaEventImpl( + delta: BlockDelta.fromJson(json['delta'] as Map), + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockDeltaEventImplToJson( + _$ContentBlockDeltaEventImpl instance) => + { + 'delta': instance.delta.toJson(), + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$ContentBlockStopEventImpl _$$ContentBlockStopEventImplFromJson( + Map json) => + _$ContentBlockStopEventImpl( + index: (json['index'] as num).toInt(), + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$ContentBlockStopEventImplToJson( + _$ContentBlockStopEventImpl instance) => + { + 'index': instance.index, + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$PingEventImpl _$$PingEventImplFromJson(Map json) => + _$PingEventImpl( + type: $enumDecode(_$MessageStreamEventTypeEnumMap, json['type']), + ); + +Map _$$PingEventImplToJson(_$PingEventImpl instance) => + { + 'type': _$MessageStreamEventTypeEnumMap[instance.type]!, + }; + +_$TextBlockDeltaImpl _$$TextBlockDeltaImplFromJson(Map json) => + _$TextBlockDeltaImpl( + text: json['text'] as String, + type: json['type'] as String, + ); + +Map _$$TextBlockDeltaImplToJson( + _$TextBlockDeltaImpl instance) => + { + 'text': instance.text, + 'type': instance.type, + }; + +_$InputJsonBlockDeltaImpl _$$InputJsonBlockDeltaImplFromJson( + Map json) => + _$InputJsonBlockDeltaImpl( + partialJson: json['partial_json'] as String?, + type: json['type'] as String, + ); + +Map _$$InputJsonBlockDeltaImplToJson( + _$InputJsonBlockDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('partial_json', instance.partialJson); + val['type'] = instance.type; + return val; +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart new file mode 100644 index 00000000..d1950d33 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/stop_reason.dart @@ -0,0 +1,30 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: StopReason +// ========================================== + +/// The reason that we stopped. +/// +/// This may be one the following values: +/// +/// - `"end_turn"`: the model reached a natural stopping point +/// - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum +/// - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated +/// +/// In non-streaming mode this value is always non-null. In streaming mode, it is +/// null in the `message_start` event and non-null otherwise. +enum StopReason { + @JsonValue('end_turn') + endTurn, + @JsonValue('max_tokens') + maxTokens, + @JsonValue('stop_sequence') + stopSequence, + @JsonValue('tool_use') + toolUse, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..578701a9 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,59 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may use. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + required String name, + + /// Description of what this tool does. + /// + /// Tool descriptions should be as detailed as possible. The more information that + /// the model has about what the tool is and how to use it, the better it will + /// perform. You can use natural language descriptions to reinforce important + /// aspects of the tool input JSON schema. + @JsonKey(includeIfNull: false) String? description, + + /// [JSON schema](https://json-schema.org/) for this tool's input. + /// + /// This defines the shape of the `input` that your tool accepts and that the model + /// will produce. + @JsonKey(name: 'input_schema') required Map inputSchema, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'input_schema' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'input_schema': inputSchema, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart new file mode 100644 index 00000000..cb3d65eb --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: ToolChoice +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +@freezed +class ToolChoice with _$ToolChoice { + const ToolChoice._(); + + /// Factory constructor for ToolChoice + const factory ToolChoice({ + /// How the model should use the provided tools. The model can use a specific tool, + /// any available tool, or decide by itself. + /// + /// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + /// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + /// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + required ToolChoiceType type, + + /// The name of the tool to use. + @JsonKey(includeIfNull: false) String? name, + }) = _ToolChoice; + + /// Object construction from a JSON representation + factory ToolChoice.fromJson(Map json) => + _$ToolChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'name']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'name': name, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart new file mode 100644 index 00000000..22b88c4d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/tool_choice_type.dart @@ -0,0 +1,24 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// ENUM: ToolChoiceType +// ========================================== + +/// How the model should use the provided tools. The model can use a specific tool, +/// any available tool, or decide by itself. +/// +/// - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. +/// - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. +/// - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. +enum ToolChoiceType { + @JsonValue('auto') + auto, + @JsonValue('any') + any, + @JsonValue('tool') + tool, +} diff --git a/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart new file mode 100644 index 00000000..37f3d39d --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/generated/schema/usage.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of anthropic_schema; + +// ========================================== +// CLASS: Usage +// ========================================== + +/// Billing and rate-limit usage. +/// +/// Anthropic's API bills and rate-limits by token counts, as tokens represent the +/// underlying cost to our systems. +/// +/// Under the hood, the API transforms requests into a format suitable for the +/// model. The model's output then goes through a parsing stage before becoming an +/// API response. As a result, the token counts in `usage` will not match one-to-one +/// with the exact visible content of an API request or response. +/// +/// For example, `output_tokens` will be non-zero, even for an empty string response +/// from Claude. +@freezed +class Usage with _$Usage { + const Usage._(); + + /// Factory constructor for Usage + const factory Usage({ + /// The number of input tokens which were used. + @JsonKey(name: 'input_tokens') required int inputTokens, + + /// The number of output tokens which were used. + @JsonKey(name: 'output_tokens') required int outputTokens, + }) = _Usage; + + /// Object construction from a JSON representation + factory Usage.fromJson(Map json) => _$UsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['input_tokens', 'output_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'input_tokens': inputTokens, + 'output_tokens': outputTokens, + }; + } +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart new file mode 100644 index 00000000..0ad0b2fc --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client.dart @@ -0,0 +1,3 @@ +export 'http_client_stub.dart' + if (dart.library.io) 'http_client_io.dart' + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart new file mode 100644 index 00000000..59abc229 --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_html.dart @@ -0,0 +1,18 @@ +import 'package:fetch_client/fetch_client.dart' as fetch; +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(fetch.FetchClient(mode: fetch.RequestMode.cors)); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + // If the request if bigger than 60KiB set persistentConnection to false + // Ref: https://github.com/Zekfad/fetch_client#large-payload + if ((request.contentLength ?? 0) > 61440) { + request.persistentConnection = false; + } + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart new file mode 100644 index 00000000..0b24e7db --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_io.dart @@ -0,0 +1,12 @@ +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(http.Client()); +} + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) { + return Future.value(request); +} diff --git a/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart new file mode 100644 index 00000000..2668d1ac --- /dev/null +++ b/packages/anthropic_sdk_dart/lib/src/http_client/http_client_stub.dart @@ -0,0 +1,10 @@ +import 'package:http/http.dart' as http; + +/// Creates a default HTTP client for the current platform. +http.Client createDefaultHttpClient() => throw UnsupportedError( + 'Cannot create a client without dart:html or dart:io.', + ); + +/// Middleware for HTTP requests. +Future onRequestHandler(final http.BaseRequest request) => + throw UnsupportedError('stub'); diff --git a/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml new file mode 100644 index 00000000..5ad1f3db --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/anthropic_openapi_curated.yaml @@ -0,0 +1,778 @@ +openapi: 3.0.3 + +info: + title: Anthropic API + description: API Spec for Anthropic API. Please see https://docs.anthropic.com/en/api for more details. + version: "1" + +servers: + - url: https://api.anthropic.com/v1 + +tags: + - name: Messages + description: Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + +paths: + /messages: + post: + operationId: createMessage + tags: + - Messages + summary: Create a Message + description: | + Send a structured list of input messages with text and/or image content, and the + model will generate the next message in the conversation. + + The Messages API can be used for either single queries or stateless multi-turn + conversations. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Message" +components: + securitySchemes: + ApiKeyAuth: + type: apiKey + in: header + name: x-api-key + + schemas: + CreateMessageRequest: + type: object + description: The request parameters for creating a message. + properties: + model: + title: Model + description: | + The model that will complete your prompt. + + See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + example: "claude-3-5-sonnet-20240620" + anyOf: + - type: string + description: The ID of the model to use for this request. + - type: string + title: Models + description: | + Available models. Mind that the list may not be exhaustive nor up-to-date. + enum: + - claude-3-5-sonnet-20240620 + - claude-3-haiku-20240307 + - claude-3-opus-20240229 + - claude-3-sonnet-20240229 + - claude-2.0 + - claude-2.1 + - claude-instant-1.2 + messages: + type: array + description: | + Input messages. + + Our models are trained to operate on alternating `user` and `assistant` + conversational turns. When creating a new `Message`, you specify the prior + conversational turns with the `messages` parameter, and the model then generates + the next `Message` in the conversation. + + Each input message must be an object with a `role` and `content`. You can + specify a single `user`-role message, or you can include multiple `user` and + `assistant` messages. The first message must always use the `user` role. + + If the final message uses the `assistant` role, the response content will + continue immediately from the content in that message. This can be used to + constrain part of the model's response. + + See [message content](https://docs.anthropic.com/en/api/messages-content) for + details on how to construct valid message objects. + + Example with a single `user` message: + + ```json + [{ "role": "user", "content": "Hello, Claude" }] + ``` + + Example with multiple conversational turns: + + ```json + [ + { "role": "user", "content": "Hello there." }, + { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + { "role": "user", "content": "Can you explain LLMs in plain English?" } + ] + ``` + + Example with a partially-filled response from Claude: + + ```json + [ + { + "role": "user", + "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + }, + { "role": "assistant", "content": "The best answer is (" } + ] + ``` + + Each input message `content` may be either a single `string` or an array of + content blocks, where each block has a specific `type`. Using a `string` for + `content` is shorthand for an array of one content block of type `"text"`. The + following input messages are equivalent: + + ```json + { "role": "user", "content": "Hello, Claude" } + ``` + + ```json + { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + ``` + + Starting with Claude 3 models, you can also send image content blocks: + + ```json + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/jpeg", + "data": "/9j/4AAQSkZJRg..." + } + }, + { "type": "text", "text": "What is in this image?" } + ] + } + ``` + + We currently support the `base64` source type for images, and the `image/jpeg`, + `image/png`, `image/gif`, and `image/webp` media types. + + See [examples](https://docs.anthropic.com/en/api/messages-examples) for more + input examples. + + Note that if you want to include a + [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + the top-level `system` parameter — there is no `"system"` role for input + messages in the Messages API. + minItems: 1 + items: + $ref: '#/components/schemas/Message' + max_tokens: + type: integer + description: | + The maximum number of tokens to generate before stopping. + + Note that our models may stop _before_ reaching this maximum. This parameter + only specifies the absolute maximum number of tokens to generate. + + Different models have different maximum values for this parameter. See + [models](https://docs.anthropic.com/en/docs/models-overview) for details. + metadata: + $ref: '#/components/schemas/CreateMessageRequestMetadata' + stop_sequences: + type: array + description: | + Custom text sequences that will cause the model to stop generating. + + Our models will normally stop when they have naturally completed their turn, + which will result in a response `stop_reason` of `"end_turn"`. + + If you want the model to stop generating when it encounters custom strings of + text, you can use the `stop_sequences` parameter. If the model encounters one of + the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + and the response `stop_sequence` value will contain the matched stop sequence. + items: + type: string + system: + type: string + description: | + System prompt. + + A system prompt is a way of providing context and instructions to Claude, such + as specifying a particular goal or role. See our + [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + temperature: + type: number + description: | + Amount of randomness injected into the response. + + Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + for analytical / multiple choice, and closer to `1.0` for creative and + generative tasks. + + Note that even with `temperature` of `0.0`, the results will not be fully + deterministic. + tool_choice: + $ref: '#/components/schemas/ToolChoice' + tools: + type: array + description: | + Definitions of tools that the model may use. + + If you include `tools` in your API request, the model may return `tool_use` + content blocks that represent the model's use of those tools. You can then run + those tools using the tool input generated by the model and then optionally + return results back to the model using `tool_result` content blocks. + + Each tool definition includes: + + - `name`: Name of the tool. + - `description`: Optional, but strongly-recommended description of the tool. + - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + shape that the model will produce in `tool_use` output content blocks. + + For example, if you defined `tools` as: + + ```json + [ + { + "name": "get_stock_price", + "description": "Get the current stock price for a given ticker symbol.", + "input_schema": { + "type": "object", + "properties": { + "ticker": { + "type": "string", + "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + } + }, + "required": ["ticker"] + } + } + ] + ``` + + And then asked the model "What's the S&P 500 at today?", the model might produce + `tool_use` content blocks in the response like this: + + ```json + [ + { + "type": "tool_use", + "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "name": "get_stock_price", + "input": { "ticker": "^GSPC" } + } + ] + ``` + + You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + input, and return the following back to the model in a subsequent `user` + message: + + ```json + [ + { + "type": "tool_result", + "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + "content": "259.75 USD" + } + ] + ``` + + Tools can be used for workflows that include running client-side tools and + functions, or more generally whenever you want the model to produce a particular + JSON structure of output. + + See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + items: + $ref: '#/components/schemas/Tool' + top_k: + type: integer + description: | + Only sample from the top K options for each subsequent token. + + Used to remove "long tail" low probability responses. + [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + top_p: + type: number + description: | + Use nucleus sampling. + + In nucleus sampling, we compute the cumulative distribution over all the options + for each subsequent token in decreasing probability order and cut it off once it + reaches a particular probability specified by `top_p`. You should either alter + `temperature` or `top_p`, but not both. + + Recommended for advanced use cases only. You usually only need to use + `temperature`. + stream: + type: boolean + default: false + description: | + Whether to incrementally stream the response using server-sent events. + + See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + details. + required: + - model + - messages + - max_tokens + CreateMessageRequestMetadata: + type: object + description: An object describing metadata about the request. + properties: + user_id: + type: string + description: | + An external identifier for the user who is associated with the request. + + This should be a uuid, hash value, or other opaque identifier. Anthropic may use + this id to help detect abuse. Do not include any identifying information such as + name, email address, or phone number. + ToolChoice: + type: object + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + properties: + type: + $ref: "#/components/schemas/ToolChoiceType" + name: + type: string + description: The name of the tool to use. + required: + - type + ToolChoiceType: + type: string + description: | + How the model should use the provided tools. The model can use a specific tool, + any available tool, or decide by itself. + + - `auto`: allows Claude to decide whether to call any provided tools or not. This is the default value. + - `any`: tells Claude that it must use one of the provided tools, but doesn’t force a particular tool. + - `tool`: allows us to force Claude to always use a particular tool specified in the `name` field. + enum: + - auto + - any + - tool + Message: + type: object + description: A message in a chat conversation. + properties: + id: + type: string + description: | + Unique object identifier. + + The format and length of IDs may change over time. + content: + description: The content of the message. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + role: + $ref: "#/components/schemas/MessageRole" + model: + type: string + description: The model that handled the request. + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + type: + type: string + description: | + Object type. + + For Messages, this is always `"message"`. + usage: + $ref: "#/components/schemas/Usage" + required: + - content + - role + MessageRole: + type: string + description: The role of the messages author. + enum: + - user + - assistant + Tool: + type: object + description: A tool the model may use. + properties: + name: + type: string + description: The name of the tool. Must match the regex `^[a-zA-Z0-9_-]{1,64}$`. + description: + type: string + description: | + Description of what this tool does. + + Tool descriptions should be as detailed as possible. The more information that + the model has about what the tool is and how to use it, the better it will + perform. You can use natural language descriptions to reinforce important + aspects of the tool input JSON schema. + input_schema: + type: object + description: | + [JSON schema](https://json-schema.org/) for this tool's input. + + This defines the shape of the `input` that your tool accepts and that the model + will produce. + additionalProperties: true + required: + - name + - input_schema + Block: + description: A block of content in a message. + oneOf: + - $ref: "#/components/schemas/TextBlock" + - $ref: "#/components/schemas/ImageBlock" + - $ref: "#/components/schemas/ToolUseBlock" + - $ref: "#/components/schemas/ToolResultBlock" + discriminator: + propertyName: type + TextBlock: + type: object + description: A block of text content. + properties: + text: + type: string + description: The text content. + type: + type: string + description: The type of content block. + default: text + required: + - text + ImageBlock: + type: object + description: A block of image content. + properties: + source: + $ref: "#/components/schemas/ImageBlockSource" + type: + type: string + description: The type of content block. + default: image + required: + - source + ImageBlockSource: + type: object + description: The source of an image block. + properties: + data: + type: string + description: The base64-encoded image data. + media_type: + type: string + description: The media type of the image. + enum: + - image/jpeg + - image/png + - image/gif + - image/webp + type: + type: string + description: The type of image source. + enum: + - base64 + required: + - data + - media_type + - type + ToolUseBlock: + type: object + description: The tool the model wants to use. + properties: + id: + type: string + description: | + A unique identifier for this particular tool use block. + This will be used to match up the tool results later. + example: toolu_01A09q90qw90lq917835lq9 + name: + type: string + description: The name of the tool being used. + example: get_weather + input: + type: object + description: An object containing the input being passed to the tool, conforming to the tool’s `input_schema`. + additionalProperties: true + type: + type: string + description: The type of content block. + default: tool_use + required: + - id + - name + - input + ToolResultBlock: + type: object + description: The result of using a tool. + properties: + tool_use_id: + type: string + description: The `id` of the tool use request this is a result for. + content: + description: | + The result of the tool, as a string (e.g. `"content": "15 degrees"`) + or list of nested content blocks (e.g. `"content": [{"type": "text", "text": "15 degrees"}]`). + These content blocks can use the text or image types. + oneOf: + - type: string + description: A single text block. + - type: array + description: An array of content blocks. + items: + $ref: "#/components/schemas/Block" + is_error: + type: boolean + description: Set to `true` if the tool execution resulted in an error. + type: + type: string + description: The type of content block. + default: tool_result + required: + - tool_use_id + - content + StopReason: + type: string + description: | + The reason that we stopped. + + This may be one the following values: + + - `"end_turn"`: the model reached a natural stopping point + - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + + In non-streaming mode this value is always non-null. In streaming mode, it is + null in the `message_start` event and non-null otherwise. + nullable: true + enum: + - end_turn + - max_tokens + - stop_sequence + - tool_use + Usage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + input_tokens: + type: integer + description: The number of input tokens which were used. + output_tokens: + type: integer + description: The number of output tokens which were used. + required: + - input_tokens + - output_tokens + MessageStreamEvent: + type: object + description: A event in a streaming conversation. + oneOf: + - $ref: "#/components/schemas/MessageStartEvent" + - $ref: "#/components/schemas/MessageDeltaEvent" + - $ref: "#/components/schemas/MessageStopEvent" + - $ref: "#/components/schemas/ContentBlockStartEvent" + - $ref: "#/components/schemas/ContentBlockDeltaEvent" + - $ref: "#/components/schemas/ContentBlockStopEvent" + - $ref: "#/components/schemas/PingEvent" + discriminator: + propertyName: type + MessageStreamEventType: + type: string + description: The type of a streaming event. + enum: + - message_start + - message_delta + - message_stop + - content_block_start + - content_block_delta + - content_block_stop + - ping + MessageStartEvent: + type: object + description: A start event in a streaming conversation. + properties: + message: + $ref: "#/components/schemas/Message" + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - message + - type + MessageDeltaEvent: + type: object + description: A delta event in a streaming conversation. + properties: + delta: + $ref: "#/components/schemas/MessageDelta" + type: + $ref: "#/components/schemas/MessageStreamEventType" + usage: + $ref: "#/components/schemas/MessageDeltaUsage" + required: + - delta + - type + - usage + MessageDelta: + type: object + description: A delta in a streaming message. + properties: + stop_reason: + $ref: "#/components/schemas/StopReason" + stop_sequence: + type: string + description: | + Which custom stop sequence was generated, if any. + + This value will be a non-null string if one of your custom stop sequences was + generated. + MessageDeltaUsage: + type: object + description: | + Billing and rate-limit usage. + + Anthropic's API bills and rate-limits by token counts, as tokens represent the + underlying cost to our systems. + + Under the hood, the API transforms requests into a format suitable for the + model. The model's output then goes through a parsing stage before becoming an + API response. As a result, the token counts in `usage` will not match one-to-one + with the exact visible content of an API request or response. + + For example, `output_tokens` will be non-zero, even for an empty string response + from Claude. + properties: + output_tokens: + type: integer + description: The cumulative number of output tokens which were used. + required: + - output_tokens + MessageStopEvent: + type: object + description: A stop event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + ContentBlockStartEvent: + type: object + description: A start event in a streaming content block. + properties: + content_block: + $ref: "#/components/schemas/Block" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - content_block + - index + - type + ContentBlockDeltaEvent: + type: object + description: A delta event in a streaming content block. + properties: + delta: + $ref: "#/components/schemas/BlockDelta" + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - delta + - index + - type + BlockDelta: + description: A delta in a streaming message. + oneOf: + - $ref: "#/components/schemas/TextBlockDelta" + - $ref: "#/components/schemas/InputJsonBlockDelta" + discriminator: + propertyName: type + TextBlockDelta: + type: object + description: A delta in a streaming text block. + properties: + text: + type: string + description: The text delta. + type: + type: string + description: The type of content block. + default: text_delta + required: + - text + - type + InputJsonBlockDelta: + type: object + description: A delta in a streaming input JSON. + properties: + partial_json: + type: string + description: The partial JSON delta. + type: + type: string + description: The type of content block. + default: input_json_delta + required: + - text + - type + ContentBlockStopEvent: + type: object + description: A stop event in a streaming content block. + properties: + index: + type: integer + description: The index of the content block. + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - index + - type + PingEvent: + type: object + description: A ping event in a streaming conversation. + properties: + type: + $ref: "#/components/schemas/MessageStreamEventType" + required: + - type + +security: + - ApiKeyAuth: [ ] diff --git a/packages/anthropic_sdk_dart/oas/main.dart b/packages/anthropic_sdk_dart/oas/main.dart new file mode 100644 index 00000000..316cc26c --- /dev/null +++ b/packages/anthropic_sdk_dart/oas/main.dart @@ -0,0 +1,60 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Anthropic API client Dart code from the OpenAPI spec. +/// https://docs.anthropic.com/en/api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/anthropic_openapi_curated.yaml'); + + await spec.generate( + package: 'Anthropic', + destination: 'lib/src/generated/', + replace: true, + schemaOptions: const SchemaGeneratorOptions( + onSchemaName: _onSchemaName, + onSchemaUnionFactoryName: _onSchemaUnionFactoryName, + ), + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} + +String? _onSchemaName(final String schemaName) => switch (schemaName) { + 'ModelEnumeration' => 'ModelCatalog', + 'ModelString' => 'ModelId', + 'MessageContentString' => 'MessageContentText', + 'MessageContentListBlock' => 'MessageContentBlocks', + 'ToolResultBlockContentListBlock' => 'ToolResultBlockContentBlocks', + 'ToolResultBlockContentString' => 'ToolResultBlockContentText', + _ => schemaName, + }; + +String? _onSchemaUnionFactoryName( + final String union, + final String unionSubclass, +) => + switch (unionSubclass) { + 'ModelCatalog' => 'model', + 'ModelId' => 'modelId', + 'MessageContentText' => 'text', + 'MessageContentBlocks' => 'blocks', + 'ToolResultBlockContentBlocks' => 'blocks', + 'ToolResultBlockContentText' => 'text', + 'TextBlockDelta' => 'textDelta', + 'InputJsonBlockDelta' => 'inputJsonDelta', + 'MessageStartEvent' => 'messageStart', + 'MessageDeltaEvent' => 'messageDelta', + 'MessageStopEvent' => 'messageStop', + 'ContentBlockStartEvent' => 'contentBlockStart', + 'ContentBlockDeltaEvent' => 'contentBlockDelta', + 'ContentBlockStopEvent' => 'contentBlockStop', + 'PingEvent' => 'ping', + _ => null, + }; diff --git a/packages/anthropic_sdk_dart/pubspec.lock b/packages/anthropic_sdk_dart/pubspec.lock new file mode 100644 index 00000000..c719386a --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.lock @@ -0,0 +1,627 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + _fe_analyzer_shared: + dependency: transitive + description: + name: _fe_analyzer_shared + sha256: "5aaf60d96c4cd00fe7f21594b5ad6a1b699c80a27420f8a837f4d68473ef09e3" + url: "https://pub.dev" + source: hosted + version: "68.0.0" + _macros: + dependency: transitive + description: dart + source: sdk + version: "0.1.0" + analyzer: + dependency: transitive + description: + name: analyzer + sha256: "21f1d3720fd1c70316399d5e2bccaebb415c434592d778cce8acb967b8578808" + url: "https://pub.dev" + source: hosted + version: "6.5.0" + args: + dependency: transitive + description: + name: args + sha256: "7cf60b9f0cc88203c5a190b4cd62a99feea42759a7fa695010eb5de1c0b2252a" + url: "https://pub.dev" + source: hosted + version: "2.5.0" + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + boolean_selector: + dependency: transitive + description: + name: boolean_selector + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + build: + dependency: transitive + description: + name: build + sha256: "80184af8b6cb3e5c1c4ec6d8544d27711700bc3e6d2efad04238c7b5290889f0" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + build_config: + dependency: transitive + description: + name: build_config + sha256: bf80fcfb46a29945b423bd9aad884590fb1dc69b330a4d4700cac476af1708d1 + url: "https://pub.dev" + source: hosted + version: "1.1.1" + build_daemon: + dependency: transitive + description: + name: build_daemon + sha256: "79b2aef6ac2ed00046867ed354c88778c9c0f029df8a20fe10b5436826721ef9" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + build_resolvers: + dependency: transitive + description: + name: build_resolvers + sha256: "339086358431fa15d7eca8b6a36e5d783728cf025e559b834f4609a1fcfb7b0a" + url: "https://pub.dev" + source: hosted + version: "2.4.2" + build_runner: + dependency: "direct dev" + description: + name: build_runner + sha256: "644dc98a0f179b872f612d3eb627924b578897c629788e858157fa5e704ca0c7" + url: "https://pub.dev" + source: hosted + version: "2.4.11" + build_runner_core: + dependency: transitive + description: + name: build_runner_core + sha256: e3c79f69a64bdfcd8a776a3c28db4eb6e3fb5356d013ae5eb2e52007706d5dbe + url: "https://pub.dev" + source: hosted + version: "7.3.1" + built_collection: + dependency: transitive + description: + name: built_collection + sha256: "376e3dd27b51ea877c28d525560790aee2e6fbb5f20e2f85d5081027d94e2100" + url: "https://pub.dev" + source: hosted + version: "5.1.1" + built_value: + dependency: transitive + description: + name: built_value + sha256: c7913a9737ee4007efedaffc968c049fd0f3d0e49109e778edc10de9426005cb + url: "https://pub.dev" + source: hosted + version: "8.9.2" + checked_yaml: + dependency: transitive + description: + name: checked_yaml + sha256: feb6bed21949061731a7a75fc5d2aa727cf160b91af9a3e464c5e3a32e28b5ff + url: "https://pub.dev" + source: hosted + version: "2.0.3" + clock: + dependency: transitive + description: + name: clock + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" + source: hosted + version: "1.1.1" + code_builder: + dependency: transitive + description: + name: code_builder + sha256: f692079e25e7869c14132d39f223f8eec9830eb76131925143b2129c4bb01b37 + url: "https://pub.dev" + source: hosted + version: "4.10.0" + collection: + dependency: transitive + description: + name: collection + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + url: "https://pub.dev" + source: hosted + version: "1.19.0" + convert: + dependency: transitive + description: + name: convert + sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" + url: "https://pub.dev" + source: hosted + version: "3.1.1" + coverage: + dependency: transitive + description: + name: coverage + sha256: "576aaab8b1abdd452e0f656c3e73da9ead9d7880e15bdc494189d9c1a1baf0db" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + crypto: + dependency: transitive + description: + name: crypto + sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab + url: "https://pub.dev" + source: hosted + version: "3.0.3" + dart_style: + dependency: transitive + description: + name: dart_style + sha256: "99e066ce75c89d6b29903d788a7bb9369cf754f7b24bf70bf4b6d6d6b26853b9" + url: "https://pub.dev" + source: hosted + version: "2.3.6" + fetch_api: + dependency: transitive + description: + name: fetch_api + sha256: "97f46c25b480aad74f7cc2ad7ccba2c5c6f08d008e68f95c1077286ce243d0e6" + url: "https://pub.dev" + source: hosted + version: "2.2.0" + fetch_client: + dependency: "direct main" + description: + name: fetch_client + sha256: "9666ee14536778474072245ed5cba07db81ae8eb5de3b7bf4a2d1e2c49696092" + url: "https://pub.dev" + source: hosted + version: "1.1.2" + file: + dependency: transitive + description: + name: file + sha256: "5fc22d7c25582e38ad9a8515372cd9a93834027aacf1801cf01164dac0ffa08c" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: "25517a4deb0c03aa0f32fd12db525856438902d9c16536311e76cdc57b31d7d1" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + freezed: + dependency: "direct dev" + description: + name: freezed + sha256: "44c19278dd9d89292cf46e97dc0c1e52ce03275f40a97c5a348e802a924bf40e" + url: "https://pub.dev" + source: hosted + version: "2.5.7" + freezed_annotation: + dependency: "direct main" + description: + name: freezed_annotation + sha256: c2e2d632dd9b8a2b7751117abcfc2b4888ecfe181bd9fca7170d9ef02e595fe2 + url: "https://pub.dev" + source: hosted + version: "2.4.4" + frontend_server_client: + dependency: transitive + description: + name: frontend_server_client + sha256: f64a0333a82f30b0cca061bc3d143813a486dc086b574bfb233b7c1372427694 + url: "https://pub.dev" + source: hosted + version: "4.0.0" + glob: + dependency: transitive + description: + name: glob + sha256: "0e7014b3b7d4dac1ca4d6114f82bf1782ee86745b9b42a92c9289c23d8a0ab63" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + graphs: + dependency: transitive + description: + name: graphs + sha256: "741bbf84165310a68ff28fe9e727332eef1407342fca52759cb21ad8177bb8d0" + url: "https://pub.dev" + source: hosted + version: "2.3.2" + http: + dependency: "direct main" + description: + name: http + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + url: "https://pub.dev" + source: hosted + version: "1.2.2" + http_multi_server: + dependency: transitive + description: + name: http_multi_server + sha256: "97486f20f9c2f7be8f514851703d0119c3596d14ea63227af6f7a481ef2b2f8b" + url: "https://pub.dev" + source: hosted + version: "3.2.1" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "40f592dd352890c3b60fec1b68e786cefb9603e05ff303dbc4dda49b304ecdf4" + url: "https://pub.dev" + source: hosted + version: "4.1.0" + intl: + dependency: transitive + description: + name: intl + sha256: d6f56758b7d3014a48af9701c085700aac781a92a87a62b1333b46d8879661cf + url: "https://pub.dev" + source: hosted + version: "0.19.0" + io: + dependency: transitive + description: + name: io + sha256: "2ec25704aba361659e10e3e5f5d672068d332fc8ac516421d483a11e5cbd061e" + url: "https://pub.dev" + source: hosted + version: "1.0.4" + js: + dependency: transitive + description: + name: js + sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf + url: "https://pub.dev" + source: hosted + version: "0.7.1" + json_annotation: + dependency: "direct main" + description: + name: json_annotation + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" + url: "https://pub.dev" + source: hosted + version: "4.9.0" + json_serializable: + dependency: "direct dev" + description: + name: json_serializable + sha256: ea1432d167339ea9b5bb153f0571d0039607a873d6e04e0117af043f14a1fd4b + url: "https://pub.dev" + source: hosted + version: "6.8.0" + logging: + dependency: transitive + description: + name: logging + sha256: "623a88c9594aa774443aa3eb2d41807a48486b5613e67599fb4c41c0ad47c340" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + macros: + dependency: transitive + description: + name: macros + sha256: "12e8a9842b5a7390de7a781ec63d793527582398d16ea26c60fed58833c9ae79" + url: "https://pub.dev" + source: hosted + version: "0.1.0-main.0" + matcher: + dependency: transitive + description: + name: matcher + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + url: "https://pub.dev" + source: hosted + version: "0.12.16+1" + meta: + dependency: "direct main" + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + mime: + dependency: transitive + description: + name: mime + sha256: "2e123074287cc9fd6c09de8336dae606d1ddb88d9ac47358826db698c176a1f2" + url: "https://pub.dev" + source: hosted + version: "1.0.5" + node_preamble: + dependency: transitive + description: + name: node_preamble + sha256: "6e7eac89047ab8a8d26cf16127b5ed26de65209847630400f9aefd7cd5c730db" + url: "https://pub.dev" + source: hosted + version: "2.0.2" + openapi_spec: + dependency: "direct dev" + description: + path: "." + ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" + resolved-ref: "93230a5e346b02789f0f727da8eecea9c7bdf118" + url: "https://github.com/davidmigloz/openapi_spec.git" + source: git + version: "0.7.8" + package_config: + dependency: transitive + description: + name: package_config + sha256: "1c5b77ccc91e4823a5af61ee74e6b972db1ef98c2ff5a18d3161c982a55448bd" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + pool: + dependency: transitive + description: + name: pool + sha256: "20fe868b6314b322ea036ba325e6fc0711a22948856475e2c2b6306e8ab39c2a" + url: "https://pub.dev" + source: hosted + version: "1.5.1" + pub_semver: + dependency: transitive + description: + name: pub_semver + sha256: "40d3ab1bbd474c4c2328c91e3a7df8c6dd629b79ece4c4bd04bee496a224fb0c" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + pubspec_parse: + dependency: transitive + description: + name: pubspec_parse + sha256: c799b721d79eb6ee6fa56f00c04b472dcd44a30d258fac2174a6ec57302678f8 + url: "https://pub.dev" + source: hosted + version: "1.3.0" + recase: + dependency: transitive + description: + name: recase + sha256: e4eb4ec2dcdee52dcf99cb4ceabaffc631d7424ee55e56f280bc039737f89213 + url: "https://pub.dev" + source: hosted + version: "4.1.0" + shelf: + dependency: transitive + description: + name: shelf + sha256: e7dd780a7ffb623c57850b33f43309312fc863fb6aa3d276a754bb299839ef12 + url: "https://pub.dev" + source: hosted + version: "1.4.2" + shelf_packages_handler: + dependency: transitive + description: + name: shelf_packages_handler + sha256: "89f967eca29607c933ba9571d838be31d67f53f6e4ee15147d5dc2934fee1b1e" + url: "https://pub.dev" + source: hosted + version: "3.0.2" + shelf_static: + dependency: transitive + description: + name: shelf_static + sha256: a41d3f53c4adf0f57480578c1d61d90342cd617de7fc8077b1304643c2d85c1e + url: "https://pub.dev" + source: hosted + version: "1.1.2" + shelf_web_socket: + dependency: transitive + description: + name: shelf_web_socket + sha256: "073c147238594ecd0d193f3456a5fe91c4b0abbcc68bf5cd95b36c4e194ac611" + url: "https://pub.dev" + source: hosted + version: "2.0.0" + source_gen: + dependency: transitive + description: + name: source_gen + sha256: "14658ba5f669685cd3d63701d01b31ea748310f7ab854e471962670abcf57832" + url: "https://pub.dev" + source: hosted + version: "1.5.0" + source_helper: + dependency: transitive + description: + name: source_helper + sha256: "6adebc0006c37dd63fe05bca0a929b99f06402fc95aa35bf36d67f5c06de01fd" + url: "https://pub.dev" + source: hosted + version: "1.3.4" + source_map_stack_trace: + dependency: transitive + description: + name: source_map_stack_trace + sha256: "84cf769ad83aa6bb61e0aa5a18e53aea683395f196a6f39c4c881fb90ed4f7ae" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + source_maps: + dependency: transitive + description: + name: source_maps + sha256: "708b3f6b97248e5781f493b765c3337db11c5d2c81c3094f10904bfa8004c703" + url: "https://pub.dev" + source: hosted + version: "0.10.12" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + stack_trace: + dependency: transitive + description: + name: stack_trace + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + url: "https://pub.dev" + source: hosted + version: "1.11.1" + stream_channel: + dependency: transitive + description: + name: stream_channel + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + url: "https://pub.dev" + source: hosted + version: "2.1.2" + stream_transform: + dependency: transitive + description: + name: stream_transform + sha256: "14a00e794c7c11aa145a170587321aedce29769c08d7f58b1d141da75e3b1c6f" + url: "https://pub.dev" + source: hosted + version: "2.1.0" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + test: + dependency: "direct dev" + description: + name: test + sha256: "713a8789d62f3233c46b4a90b174737b2c04cb6ae4500f2aa8b1be8f03f5e67f" + url: "https://pub.dev" + source: hosted + version: "1.25.8" + test_api: + dependency: transitive + description: + name: test_api + sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" + url: "https://pub.dev" + source: hosted + version: "0.7.3" + test_core: + dependency: transitive + description: + name: test_core + sha256: "12391302411737c176b0b5d6491f466b0dd56d4763e347b6714efbaa74d7953d" + url: "https://pub.dev" + source: hosted + version: "0.6.5" + timing: + dependency: transitive + description: + name: timing + sha256: "70a3b636575d4163c477e6de42f247a23b315ae20e86442bebe32d3cabf61c32" + url: "https://pub.dev" + source: hosted + version: "1.0.1" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" + source: hosted + version: "1.3.2" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: f652077d0bdf60abe4c1f6377448e8655008eef28f128bc023f7b5e8dfeb48fc + url: "https://pub.dev" + source: hosted + version: "14.2.4" + watcher: + dependency: transitive + description: + name: watcher + sha256: "3d2ad6751b3c16cf07c7fca317a1413b3f26530319181b37e3b9039b84fc01d8" + url: "https://pub.dev" + source: hosted + version: "1.1.0" + web: + dependency: transitive + description: + name: web + sha256: d43c1d6b787bf0afad444700ae7f4db8827f701bc61c255ac8d328c6f4d52062 + url: "https://pub.dev" + source: hosted + version: "1.0.0" + web_socket: + dependency: transitive + description: + name: web_socket + sha256: "3c12d96c0c9a4eec095246debcea7b86c0324f22df69893d538fcc6f1b8cce83" + url: "https://pub.dev" + source: hosted + version: "0.1.6" + web_socket_channel: + dependency: transitive + description: + name: web_socket_channel + sha256: "9f187088ed104edd8662ca07af4b124465893caf063ba29758f97af57e61da8f" + url: "https://pub.dev" + source: hosted + version: "3.0.1" + webkit_inspection_protocol: + dependency: transitive + description: + name: webkit_inspection_protocol + sha256: "87d3f2333bb240704cd3f1c6b5b7acd8a10e7f0bc28c28dcf14e782014f4a572" + url: "https://pub.dev" + source: hosted + version: "1.2.1" + yaml: + dependency: transitive + description: + name: yaml + sha256: "75769501ea3489fca56601ff33454fe45507ea3bfb014161abc3b43ae25989d5" + url: "https://pub.dev" + source: hosted + version: "3.1.2" +sdks: + dart: ">=3.4.0 <4.0.0" diff --git a/packages/anthropic_sdk_dart/pubspec.yaml b/packages/anthropic_sdk_dart/pubspec.yaml new file mode 100644 index 00000000..84376d27 --- /dev/null +++ b/packages/anthropic_sdk_dart/pubspec.yaml @@ -0,0 +1,34 @@ +name: anthropic_sdk_dart +description: Dart Client for the Anthropic API (Claude 3.5 Sonnet, Opus, Haiku, etc.). +version: 0.1.0 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/anthropic_sdk_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:anthropic_sdk_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - nlp + - llms + - anthropic + +environment: + sdk: ">=3.4.0 <4.0.0" + +dependencies: + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/anthropic_sdk_dart/test/messages_test.dart b/packages/anthropic_sdk_dart/test/messages_test.dart new file mode 100644 index 00000000..648ad7f4 --- /dev/null +++ b/packages/anthropic_sdk_dart/test/messages_test.dart @@ -0,0 +1,320 @@ +// ignore_for_file: avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Anthropic Messages API tests', () { + late AnthropicClient client; + + setUp(() async { + client = AnthropicClient( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call messages API', timeout: const Timeout(Duration(minutes: 5)), + () async { + const models = Models.values; + for (final model in models) { + print('Testing model: ${model.name}'); + final res = await client.createMessage( + request: CreateMessageRequest( + model: Model.model(model), + temperature: 0, + maxTokens: 1024, + system: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: const [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + expect(res.id, isNotEmpty); + expect( + res.content.text.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.role, MessageRole.assistant); + expect( + res.model?.replaceAll(RegExp(r'[-.]'), ''), + model.name.toLowerCase(), + ); + expect(res.stopReason, StopReason.endTurn); + expect(res.stopSequence, isNull); + expect(res.type, 'message'); + expect(res.usage?.inputTokens, greaterThan(0)); + expect(res.usage?.outputTokens, greaterThan(0)); + await Future.delayed( + const Duration(seconds: 5), + ); // To avoid rate limit + } + }); + + test('Test call messages streaming API', + timeout: const Timeout(Duration(minutes: 5)), () async { + final stream = client.createMessageStream( + request: const CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + temperature: 0, + maxTokens: 1024, + system: 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces, commas or additional explanations.', + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'List the numbers from 1 to 9 in order.', + ), + ), + ], + ), + ); + String text = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claudeInstant12.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.endTurn); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.text, isNotNull); + expect(v.contentBlock.type, 'text'); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.text, isNotEmpty); + expect(v.delta.type, 'text_delta'); + text += v.delta + .mapOrNull(textDelta: (v) => v.text) + ?.replaceAll(RegExp(r'[\s\n]'), '') ?? + ''; + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (PingEvent v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + expect(text, contains('123456789')); + }); + + test('Test response max tokens', () async { + const request = CreateMessageRequest( + model: Model.model(Models.claudeInstant12), + maxTokens: 1, + messages: [ + Message( + role: MessageRole.user, + content: MessageContent.text( + 'Tell me a joke.', + ), + ), + ], + ); + + final res = await client.createMessage(request: request); + expect(res.stopReason, StopReason.maxTokens); + }); + + const tool = Tool( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool use', () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final aiMessage1 = await client.createMessage(request: request1); + expect(aiMessage1.role, MessageRole.assistant); + + var toolUse = aiMessage1.content.blocks.first; + expect(toolUse, isA()); + toolUse = toolUse as ToolUseBlock; + + expect(toolUse.name, tool.name); + expect(toolUse.input, isNotEmpty); + expect(toolUse.input.containsKey('location'), isTrue); + expect(toolUse.input['location'], contains('Boston')); + + final toolResult = json.encode({ + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }); + + final request2 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now?', + ), + ), + Message( + role: MessageRole.assistant, + content: aiMessage1.content, + ), + Message( + role: MessageRole.user, + content: MessageContent.blocks([ + Block.toolResult( + toolUseId: toolUse.id, + content: ToolResultBlockContent.text(toolResult), + ), + ]), + ), + ], + tools: [tool], + maxTokens: 1024, + ); + final aiMessage2 = await client.createMessage(request: request2); + + expect(aiMessage2.role, MessageRole.assistant); + expect(aiMessage2.content.text, contains('22')); + }); + + test('Test tool use streaming', + timeout: const Timeout(Duration(minutes: 5)), () async { + final request1 = CreateMessageRequest( + model: const Model.model(Models.claude35Sonnet20240620), + messages: [ + const Message( + role: MessageRole.user, + content: MessageContent.text( + 'What’s the weather like in Boston right now in Celsius?', + ), + ), + ], + tools: [tool], + toolChoice: ToolChoice( + type: ToolChoiceType.tool, + name: tool.name, + ), + maxTokens: 1024, + ); + final stream = client.createMessageStream( + request: request1, + ); + String inputJson = ''; + await for (final res in stream) { + res.map( + messageStart: (v) { + expect(res.type, MessageStreamEventType.messageStart); + expect(v.message.id, isNotEmpty); + expect(v.message.role, MessageRole.assistant); + expect( + v.message.model?.replaceAll(RegExp(r'[-.]'), ''), + Models.claude35Sonnet20240620.name.toLowerCase(), + ); + expect(v.message.stopReason, isNull); + expect(v.message.stopSequence, isNull); + expect(v.message.usage?.inputTokens, greaterThan(0)); + expect(v.message.usage?.outputTokens, greaterThan(0)); + }, + messageDelta: (v) { + expect(res.type, MessageStreamEventType.messageDelta); + expect(v.delta.stopReason, StopReason.toolUse); + expect(v.usage.outputTokens, greaterThan(0)); + }, + messageStop: (v) { + expect(res.type, MessageStreamEventType.messageStop); + }, + contentBlockStart: (v) { + expect(res.type, MessageStreamEventType.contentBlockStart); + expect(v.index, 0); + expect(v.contentBlock.type, 'tool_use'); + expect(v.contentBlock.toolUse, isNotNull); + expect(v.contentBlock.toolUse!.id, isNotEmpty); + expect(v.contentBlock.toolUse!.name, tool.name); + }, + contentBlockDelta: (v) { + expect(res.type, MessageStreamEventType.contentBlockDelta); + expect(v.index, greaterThanOrEqualTo(0)); + expect(v.delta.type, 'input_json_delta'); + inputJson += v.delta.inputJson; + }, + contentBlockStop: (v) { + expect(res.type, MessageStreamEventType.contentBlockStop); + expect(v.index, greaterThanOrEqualTo(0)); + }, + ping: (PingEvent v) { + expect(res.type, MessageStreamEventType.ping); + }, + ); + } + final input = json.decode(inputJson) as Map; + expect(input['location'], contains('Boston')); + expect(input['unit'], 'celsius'); + }); + }); +} diff --git a/packages/chromadb/CHANGELOG.md b/packages/chromadb/CHANGELOG.md index 899efe6f..70f441bd 100644 --- a/packages/chromadb/CHANGELOG.md +++ b/packages/chromadb/CHANGELOG.md @@ -1,3 +1,11 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.2.0 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/chromadb/pubspec.yaml b/packages/chromadb/pubspec.yaml index 7a218b9c..eb6a6f29 100644 --- a/packages/chromadb/pubspec.yaml +++ b/packages/chromadb/pubspec.yaml @@ -1,10 +1,10 @@ name: chromadb description: Dart Client for the Chroma open-source embedding database API. -version: 0.2.0 +version: 0.2.0+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/chromadb issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -12,20 +12,20 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/googleai_dart/CHANGELOG.md b/packages/googleai_dart/CHANGELOG.md index 8277d0d5..7bc6e29d 100644 --- a/packages/googleai_dart/CHANGELOG.md +++ b/packages/googleai_dart/CHANGELOG.md @@ -1,3 +1,15 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0+2 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.1.0+1 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.1.0 - **REFACTOR**: Minor changes ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e)) diff --git a/packages/googleai_dart/lib/googleai_dart.dart b/packages/googleai_dart/lib/googleai_dart.dart index e673e9d9..f0807211 100644 --- a/packages/googleai_dart/lib/googleai_dart.dart +++ b/packages/googleai_dart/lib/googleai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -library googleai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show GoogleAIClientException; diff --git a/packages/googleai_dart/lib/src/http_client/http_client.dart b/packages/googleai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/googleai_dart/lib/src/http_client/http_client.dart +++ b/packages/googleai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/googleai_dart/pubspec.yaml b/packages/googleai_dart/pubspec.yaml index ca8f0f00..ee294296 100644 --- a/packages/googleai_dart/pubspec.yaml +++ b/packages/googleai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: googleai_dart description: Dart Client for the Google AI API (Gemini Pro, Gemini Pro Vision, embeddings, etc.). -version: 0.1.0 +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/googleai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:googleai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,22 +14,22 @@ topics: - gemini environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/langchain/CHANGELOG.md b/packages/langchain/CHANGELOG.md index 47e5a89d..b5ee86a2 100644 --- a/packages/langchain/CHANGELOG.md +++ b/packages/langchain/CHANGELOG.md @@ -1,6 +1,43 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +## 0.7.5 + + - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567)) + +## 0.7.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf)) + +## 0.7.3 + +> Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package. + +- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) +- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +## 0.7.2 + +> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package. + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + ## 0.7.1 -> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. +> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is now available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. - **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) - **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e)) @@ -153,7 +190,7 @@ ## 0.0.13 -> Check out the [LangChain Expression Language documentation](https://langchaindart.com/#/expression_language/interface) for more details +> Check out the [LangChain Expression Language documentation](https://langchaindart.dev/#/expression_language/interface) for more details - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615)) - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf)) @@ -274,7 +311,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 @@ -322,7 +359,7 @@ https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef - Add support for LLMs - `BaseLLM` class (#14). - Add support for Chat models - `BaseChatModel` class (#10). - Add support for prompt templates - `PromptTemplate` class (#7). -- Publish LangChain.dart documentation on http://langchaindart.com. +- Publish LangChain.dart documentation on http://langchaindart.dev. ## 0.0.1-dev.1 diff --git a/packages/langchain/README.md b/packages/langchain/README.md index bef19382..d01e9ccd 100644 --- a/packages/langchain/README.md +++ b/packages/langchain/README.md @@ -5,14 +5,15 @@ [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) +[![Code Assist AI](https://img.shields.io/badge/AI-Code%20Assist-EB9FDA)](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) Build LLM-powered Dart/Flutter applications. ## What is LangChain.dart? -LangChain.dart is a Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). +LangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118). -LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, extraction, etc.). +LangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, translation, extraction, recsys, etc.). The components can be grouped into a few core modules: @@ -22,7 +23,7 @@ The components can be grouped into a few core modules: - 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). - 🤖 **Agents:** "bots" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task. -The different components can be composed together using the LangChain Expression Language (LCEL). +The different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started). ## Motivation @@ -37,66 +38,130 @@ LangChain.dart aims to fill this gap by abstracting the intricacies of working w ## Packages LangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages: -- [`langchain_core`](https://pub.dev/packages/langchain_core): contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. - > Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. -- [`langchain`](https://pub.dev/packages/langchain): contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. - > Depend on this package to build LLM applications with LangChain.dart. - > This package exposes `langchain_core` so you don't need to depend on it explicitly. -- [`langchain_community`](https://pub.dev/packages/langchain_community): contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. - > Depend on this package if you want to use any of the integrations or components it provides. -- Integration-specific packages (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), etc.): popular third-party integrations are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. - > Depend on an integration-specific package if you want to use the specific integration. + +### [`langchain_core`](https://pub.dev/packages/langchain_core) [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) + +Contains only the core abstractions as well as LangChain Expression Language as a way to compose them together. + +> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it. + +### [`langchain`](https://pub.dev/packages/langchain) [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) + +Contains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture. + +> Depend on this package to build LLM applications with LangChain.dart. +> +> This package exposes `langchain_core` so you don't need to depend on it explicitly. + +### [`langchain_community`](https://pub.dev/packages/langchain_community) [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) + +Contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. + +> Depend on this package if you want to use any of the integrations or components it provides. + +### Integration-specific packages + +Popular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package. + +> Depend on an integration-specific package if you want to use the specific integration. + +| Package | Version | Description | +|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.) | +| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | +| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | +| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | +| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | +| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | +| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-4o, o1, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | +| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | +| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration |

-| Package | Version | Description | -|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [langchain_core](https://pub.dev/packages/langchain_core) | [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core) | Core abstractions and LCEL | -| [langchain](https://pub.dev/packages/langchain) | [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain) | Higher-level and use-case specific chains, agents, and retrieval algorithms | -| [langchain_community](https://pub.dev/packages/langchain_community) | [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community) | Third-party integrations (without specific packages) and community-contributed components | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai) | OpenAI integration (GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) | -| [langchain_google](https://pub.dev/packages/langchain_google) | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google) | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.) | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase) | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.)) | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) | Ollama integration (Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma, CodeGemma, Command R, LLaVA, DBRX, Qwen 1.5, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.) | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.). | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone) | Pinecone vector database integration | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma) | Chroma vector database integration | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase) | Supabase Vector database integration | - -Functionality provided by each integration package: - -| Package | LLMs | Chat models | Embeddings | Vector stores | Chains | Agents | Tools | -|---------------------------------------------------------------------|------|-------------|------------|---------------|--------|--------|-------| -| [langchain_community](https://pub.dev/packages/langchain_community) | | | | | | | | -| [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | | ✔ | ✔ | ✔ | -| [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | ✔ | | | | -| [langchain_firebase](https://pub.dev/packages/langchain_firebase) | | ✔ | | | | | | -| [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | | | | | -| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | | ✔ | ✔ | | | | | -| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | | | | ✔ | | | | -| [langchain_chroma](https://pub.dev/packages/langchain_chroma) | | | | ✔ | | | | -| [langchain_supabase](https://pub.dev/packages/langchain_supabase) | | | | ✔ | | | | +### API clients packages The following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently: -| Package | Version | Description | -|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|-----------------------------------------------| -| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | Chroma DB API client | -| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | Google AI for Developers (Gemini API) client | -| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | Mistral AI API client | -| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | Ollama API client | -| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | OpenAI API client | -| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | GCP Vertex AI API client | +> Depend on an API client package if you just want to consume the API of a specific provider directly without using LangChain.dart abstractions. + +| Package | Version | Description | +|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------| +| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart) | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart) | [Anthropic](https://docs.anthropic.com/en/api) API client | +| [chromadb](https://pub.dev/packages/chromadb) | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb) | [Chroma DB](https://trychroma.com/) API client | +| [googleai_dart](https://pub.dev/packages/googleai_dart) | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart) | [Google AI for Developers](https://ai.google.dev/) API client | +| [mistralai_dart](https://pub.dev/packages/mistralai_dart) | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart) | [Mistral AI](https://docs.mistral.ai/api) API client | +| [ollama_dart](https://pub.dev/packages/ollama_dart) | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart) | [Ollama](https://ollama.ai/) API client | +| [openai_dart](https://pub.dev/packages/openai_dart) | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) | [OpenAI](https://platform.openai.com/docs/api-reference) API client | +| [tavily_dart](https://pub.dev/packages/tavily_dart) | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) | [Tavily](https://tavily.com) API client | +| [vertex_ai](https://pub.dev/packages/vertex_ai) | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai) | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client | + +## Integrations + +The following integrations are available in LangChain.dart: + +### Chat Models + +| Chat model | Package | Streaming | Multi-modal | Tool-call | Description | +|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic) | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔ | ✔ | ✔ | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API) | +| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase) | ✔ | ✔ | ✔ | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API) | +| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai) | [langchain_google](https://pub.dev/packages/langchain_google) | ✔ | ✔ | ✔ | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API) | +| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔ | | | [Mistral Chat API](https://ollama.ai) | +| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | ✔ | ✔ | [Ollama Chat API](https://ollama.ai) | +| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | ✔ | ✔ | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | +| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | | | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai) | + +### LLMs + +_Note: Prefer using Chat Models over LLMs as many providers have deprecated them._ + +| LLM | Package | Streaming | Description | +|-------------------------------------------------------------------------------------------------|---------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------| +| [Ollama](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔ | [Ollama Completions API](https://ollama.ai) | +| [OpenAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔ | [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions) | +| [VertexAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | | [GCP Vertex AI Text API](https://cloud.google.com/vertex-ai) | + +### Embedding Models + +| Embedding model | Package | Description | +|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------| +| [GoogleGenerativeAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/google_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Google AI Embeddings API](https://ai.google.dev) | +| [MistralAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/mistralai) | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [Mistral Embeddings API](https://docs.mistral.ai) | +| [OllamaEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/ollama) | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | [Ollama Embeddings API](https://ollama.ai) | +| [OpenAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/openai) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) | +| [VertexAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [GCP Vertex AI Embeddings API](https://cloud.google.com/vertex-ai) | + +### Vector Stores + +| Vector store | Package | Description | +|--------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| +| [Chroma](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/chroma) | [langchain_chroma](https://pub.dev/packages/langchain_chroma) | [Chroma](https://trychroma.com/) integration | +| [MemoryVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/memory) | [langchain](https://pub.dev/packages/langchain) | In-memory vector store for prototype and testing | +| [ObjectBoxVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox) | [langchain_community](https://pub.dev/packages/langchain_community) | [ObjectBox](https://objectbox.io/) integration | +| [Pinecone](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/pinecone) | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone) | [Pinecone](https://pinecone.io/) integration | +| [Supabase](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) | [langchain_supabase](https://pub.dev/packages/langchain_supabase) | [Supabase Vector](https://supabase.com/vector) integration | +| [VertexAIMatchingEngine](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) | [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) (former Matching Engine) integration | + +### Tools + +| Tool | Package | Description | +|-----------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| [CalculatorTool](https://langchaindart.dev/#/modules/agents/tools/calculator) | [langchain_community](https://pub.dev/packages/langchain_community) | To calculate math expressions | +| [OpenAIDallETool](https://langchaindart.dev/#/modules/agents/tools/openai_dall_e) | [langchain_openai](https://pub.dev/packages/langchain_openai) | [OpenAI's DALL-E Image Generator](https://platform.openai.com/docs/api-reference/images) | +| TavilyAnswerTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns an answer for a query using the [Tavily](https://tavily.com) search engine | +| TavilySearchResultsTool | [langchain_community](https://pub.dev/packages/langchain_community) | Returns a list of results for a query using the [Tavily](https://tavily.com) search engine | ## Getting started -To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_openai` or `langchain_google`): +To start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.): ```yaml dependencies: langchain: {version} + langchain_community: {version} langchain_openai: {version} langchain_google: {version} ... @@ -156,9 +221,10 @@ print(res); ## Documentation -- [LangChain.dart documentation](https://langchaindart.com) +- [LangChain.dart documentation](https://langchaindart.dev) +- [Code Assist AI](https://app.commanddash.io/agent?github=https://github.com/davidmigloz/langchain_dart) (Chatbot for LangChain.dart documentation) - [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples) -- [LangChain.dart blog](https://blog.langchaindart.com) +- [LangChain.dart blog](https://blog.langchaindart.dev) - [Project board](https://github.com/users/davidmigloz/projects/2/views/1) ## Community diff --git a/packages/langchain/lib/src/agents/agents.dart b/packages/langchain/lib/src/agents/agents.dart index ec89c95c..cc12a558 100644 --- a/packages/langchain/lib/src/agents/agents.dart +++ b/packages/langchain/lib/src/agents/agents.dart @@ -1,3 +1,4 @@ export 'package:langchain_core/agents.dart'; export 'executor.dart'; +export 'tools.dart'; diff --git a/packages/langchain/lib/src/agents/tools.dart b/packages/langchain/lib/src/agents/tools.dart new file mode 100644 index 00000000..02a16284 --- /dev/null +++ b/packages/langchain/lib/src/agents/tools.dart @@ -0,0 +1,304 @@ +import 'package:langchain_core/agents.dart'; +import 'package:langchain_core/chains.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/exceptions.dart'; +import 'package:langchain_core/memory.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; + +const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( + prompt: PromptTemplate( + inputVariables: {}, + template: 'You are a helpful AI assistant', + ), +); + +/// {@template tools_agent} +/// An agent powered by the tool calling API. +/// +/// Example: +/// ```dart +/// final llm = ChatOllama( +/// defaultOptions: ChatOllamaOptions( +/// model: 'llama3-groq-tool-use', +/// temperature: 0, +/// ), +/// ); +/// final tools = [CalculatorTool()]; +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final executor = AgentExecutor(agent: agent); +/// final res = await executor.run('What is 40 raised to the 0.43 power? '); +/// ``` +/// +/// You can use any chat model that supports tools, like `ChatOpenAI`, +/// `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the +/// [documentation](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) +/// for a complete list. +/// +/// You can easily add memory to the agent using the memory parameter from the +/// [ToolsAgent.fromLLMAndTools] constructor. Make sure you enable +/// [BaseChatMemory.returnMessages] on your memory, as the agent works with +/// [ChatMessage]s. The default prompt template already takes care of adding +/// the history to the prompt. For example: +/// ```dart +/// final memory = ConversationBufferMemory(returnMessages: true); +/// final agent = ToolsAgent.fromLLMAndTools( +/// llm: llm, +/// tools: tools, +/// memory: memory, +/// ); +/// ``` +/// +/// If you need to use your own [llmChain] make sure your prompt template +/// includes: +/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent. +/// - If you are using memory: +/// * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat +/// messages. +/// - If you are not using memory: +/// * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`: +/// the intermediary work of the agent (if you are using memory, the agent +/// uses the memory to store the intermediary work). +/// Example: +/// ```dart +/// ChatPromptTemplate.fromTemplates([ +/// (ChatMessageType.system, 'You are a helpful AI assistant'), +/// (ChatMessageType.messagesPlaceholder, 'history'), +/// (ChatMessageType.messagePlaceholder, 'input'), +/// ]); +/// ``` +/// +/// You can use [ToolsAgent.createPrompt] to build the prompt +/// template if you only need to customize the system message or add some +/// extra messages. +/// {@endtemplate} +class ToolsAgent extends BaseSingleActionAgent { + /// {@macro tools_agent} + ToolsAgent({ + required this.llmChain, + required super.tools, + }) : _parser = const ToolsAgentOutputParser(), + assert( + llmChain.memory != null || + llmChain.prompt.inputVariables + .contains(BaseActionAgent.agentScratchpadInputKey), + '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the ' + 'variables in the prompt, got ${llmChain.prompt.inputVariables}', + ), + assert( + llmChain.memory == null || llmChain.memory!.returnMessages, + 'The memory must have `returnMessages` set to true', + ); + + /// Chain to use to call the LLM. + /// + /// If the chain does not have a memory, the prompt MUST include a variable + /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put + /// its intermediary work. + /// + /// If the chain has a memory, the agent will use the memory to store the + /// intermediary work. + /// + /// The memory must have [BaseChatMemory.returnMessages] set to true for + /// the agent to work properly. + final LLMChain llmChain; + + /// Parser to use to parse the output of the LLM. + final ToolsAgentOutputParser _parser; + + /// The key for the input to the agent. + static const agentInputKey = 'input'; + + @override + Set get inputKeys => {agentInputKey}; + + /// Construct an [ToolsAgent] from an [llm] and [tools]. + /// + /// - [llm] - The model to use for the agent. + /// - [tools] - The tools the agent has access to. You can omit this field if + /// you have already configured the tools in the [llm]. + /// - [memory] - The memory to use for the agent. + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. Default: "You are a helpful AI assistant". + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the input from the agent. + factory ToolsAgent.fromLLMAndTools({ + required final BaseChatModel llm, + final List? tools, + final BaseChatMemory? memory, + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + }) { + assert( + tools != null || llm.defaultOptions.tools != null, + 'Tools must be provided or configured in the llm', + ); + assert( + tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool), + 'All elements in `tools` must be of type `Tool` or its subclasses', + ); + + final actualTools = tools ?? llm.defaultOptions.tools!.cast(); + + return ToolsAgent( + llmChain: LLMChain( + llm: llm, + llmOptions: llm.defaultOptions.copyWith( + tools: actualTools, + ), + prompt: createPrompt( + systemChatMessage: systemChatMessage, + extraPromptMessages: extraPromptMessages, + memory: memory, + ), + memory: memory, + ), + tools: actualTools, + ); + } + + @override + Future> plan(final AgentPlanInput input) async { + final llmChainInputs = _constructLlmChainInputs( + input.intermediateSteps, + input.inputs, + ); + final ChainValues output = await llmChain.invoke(llmChainInputs); + final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage; + return _parser.parseChatMessage(predictedMessage); + } + + Map _constructLlmChainInputs( + final List intermediateSteps, + final InputValues inputs, + ) { + final dynamic agentInput; + + // If there is a memory, we pass the last agent step as a function message. + // Otherwise, we pass the input as a human message. + if (llmChain.memory != null && intermediateSteps.isNotEmpty) { + final lastStep = intermediateSteps.last; + final functionMsg = ChatMessage.tool( + toolCallId: lastStep.action.id, + content: lastStep.observation, + ); + agentInput = functionMsg; + } else { + agentInput = switch (inputs[agentInputKey]) { + final String inputStr => ChatMessage.humanText(inputStr), + final ChatMessage inputMsg => inputMsg, + final List inputMsgs => inputMsgs, + _ => throw LangChainException( + message: 'Agent expected a String or ChatMessage as input,' + ' got ${inputs[agentInputKey]}', + ), + }; + } + + return { + ...inputs, + agentInputKey: agentInput, + if (llmChain.memory == null) + BaseActionAgent.agentScratchpadInputKey: + _constructScratchPad(intermediateSteps), + }; + } + + List _constructScratchPad( + final List intermediateSteps, + ) { + return [ + ...intermediateSteps.map((final s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }).expand((final m) => m), + ]; + } + + @override + String get agentType => 'tool-agent'; + + /// Creates prompt for this agent. + /// + /// It takes care of adding the necessary placeholders to handle the + /// intermediary work of the agent or the memory. + /// + /// - [systemChatMessage] message to use as the system message that will be + /// the first in the prompt. + /// - [extraPromptMessages] prompt messages that will be placed between the + /// system message and the new human input. + /// - [memory] optional memory to use for the agent. + static BasePromptTemplate createPrompt({ + final SystemChatMessagePromptTemplate systemChatMessage = + _systemChatMessagePromptTemplate, + final List? extraPromptMessages, + final BaseChatMemory? memory, + }) { + return ChatPromptTemplate.fromPromptMessages([ + systemChatMessage, + ...?extraPromptMessages, + for (final memoryKey in memory?.memoryKeys ?? {}) + MessagesPlaceholder(variableName: memoryKey), + const MessagePlaceholder(variableName: agentInputKey), + if (memory == null) + const MessagesPlaceholder( + variableName: BaseActionAgent.agentScratchpadInputKey, + ), + ]); + } +} + +/// {@template tools_agent_output_parser} +/// Parser for [ToolsAgent]. +/// +/// It parses the output of the LLM and returns the corresponding +/// [BaseAgentAction] to be executed. +/// {@endtemplate} +class ToolsAgentOutputParser extends BaseOutputParser> { + /// {@macro tools_agent_output_parser} + const ToolsAgentOutputParser() + : super(defaultOptions: const OutputParserOptions()); + + @override + Future> invoke( + final ChatResult input, { + final OutputParserOptions? options, + }) { + return parseChatMessage(input.output); + } + + /// Parses the [message] and returns the corresponding [BaseAgentAction]. + Future> parseChatMessage( + final AIChatMessage message, + ) async { + final toolCalls = message.toolCalls; + if (toolCalls.isNotEmpty) { + return toolCalls.map((final toolCall) { + return AgentAction( + id: toolCall.id, + tool: toolCall.name, + toolInput: toolCall.arguments, + log: 'Invoking: `${toolCall.name}` ' + 'with `${toolCall.arguments}`\n' + 'Responded: ${message.content}\n', + messageLog: [message], + ); + }).toList(growable: false); + } else { + return [ + AgentFinish( + returnValues: {'output': message.content}, + log: message.content, + ), + ]; + } + } +} diff --git a/packages/langchain/lib/src/embeddings/cache.dart b/packages/langchain/lib/src/embeddings/cache.dart index 270ae124..fe0c9435 100644 --- a/packages/langchain/lib/src/embeddings/cache.dart +++ b/packages/langchain/lib/src/embeddings/cache.dart @@ -135,7 +135,7 @@ class EmbeddingsByteStoreEncoder @override String encodeKey(final String key) { final keyHash = sha1.convert(utf8.encode(key)).toString(); - return uuid.v5(Uuid.NAMESPACE_URL, keyHash); + return uuid.v5(Namespace.url.name, keyHash); } @override diff --git a/packages/langchain/lib/src/utils/utils.dart b/packages/langchain/lib/src/utils/utils.dart index d41e35b9..28748719 100644 --- a/packages/langchain/lib/src/utils/utils.dart +++ b/packages/langchain/lib/src/utils/utils.dart @@ -1,2 +1,6 @@ export 'package:langchain_core/utils.dart' - show calculateSimilarity, cosineSimilarity, getIndexesMostSimilarEmbeddings; + show + RetryOptions, + calculateSimilarity, + cosineSimilarity, + getIndexesMostSimilarEmbeddings; diff --git a/packages/langchain/lib/src/vector_stores/memory.dart b/packages/langchain/lib/src/vector_stores/memory.dart index e812d275..a439e1cf 100644 --- a/packages/langchain/lib/src/vector_stores/memory.dart +++ b/packages/langchain/lib/src/vector_stores/memory.dart @@ -14,7 +14,9 @@ import 'package:uuid/uuid.dart'; /// This is not efficient for large vector stores as it has a time complexity /// of O(vector_dimensionality * num_vectors). /// -/// For more efficient vector stores, see [VertexAIMatchingEngine](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html). +/// This class is useful for testing and prototyping, but it is not recommended +/// for production use cases. See other vector store integrations for +/// production use cases. /// /// ### Filtering /// diff --git a/packages/langchain/pubspec.yaml b/packages/langchain/pubspec.yaml index 0c0a2a58..48657423 100644 --- a/packages/langchain/pubspec.yaml +++ b/packages/langchain/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain description: Build powerful LLM-based Dart and Flutter applications with LangChain.dart. -version: 0.7.1 +version: 0.7.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain issue_tracker: https://github.com/davidmigloz/langchain_dart/issues homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,15 +13,18 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: characters: ^1.3.0 - collection: '>=1.17.0 <1.19.0' + collection: ^1.18.0 crypto: ^3.0.3 - langchain_core: ^0.3.1 + langchain_core: 0.3.6 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 + langchain_community: ^0.3.2 + langchain_openai: ^0.7.2 + langchain_ollama: ^0.3.2 diff --git a/packages/langchain/pubspec_overrides.yaml b/packages/langchain/pubspec_overrides.yaml index 3508ed77..d9c6fc7e 100644 --- a/packages/langchain/pubspec_overrides.yaml +++ b/packages/langchain/pubspec_overrides.yaml @@ -1,4 +1,16 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_community,langchain_core,langchain_ollama,langchain_openai,ollama_dart,openai_dart,tavily_dart dependency_overrides: + langchain_community: + path: ../langchain_community langchain_core: path: ../langchain_core + langchain_ollama: + path: ../langchain_ollama + langchain_openai: + path: ../langchain_openai + ollama_dart: + path: ../ollama_dart + openai_dart: + path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain/test/agents/assets/state_of_the_union.txt b/packages/langchain/test/agents/assets/state_of_the_union.txt new file mode 100644 index 00000000..d50175de --- /dev/null +++ b/packages/langchain/test/agents/assets/state_of_the_union.txt @@ -0,0 +1,723 @@ +Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. + +Last year COVID-19 kept us apart. This year we are finally together again. + +Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. + +With a duty to one another to the American people to the Constitution. + +And with an unwavering resolve that freedom will always triumph over tyranny. + +Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. + +He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. + +He met the Ukrainian people. + +From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. + +Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. + +In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. + +Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. + +Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. + +Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. + +They keep moving. + +And the costs and the threats to America and the world keep rising. + +That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. + +The United States is a member along with 29 other nations. + +It matters. American diplomacy matters. American resolve matters. + +Putin’s latest attack on Ukraine was premeditated and unprovoked. + +He rejected repeated efforts at diplomacy. + +He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. + +We prepared extensively and carefully. + +We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. + +I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. + +We countered Russia’s lies with truth. + +And now that he has acted the free world is holding him accountable. + +Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. + +We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. + +Together with our allies –we are right now enforcing powerful economic sanctions. + +We are cutting off Russia’s largest banks from the international financial system. + +Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. + +We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. + +Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. + +The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. + +We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. + +And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. + +The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. + +Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. + +We are giving more than $1 Billion in direct assistance to Ukraine. + +And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. + +Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. + +Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. + +For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. + +As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. + +And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. + +Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. + +And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. + +To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. + +And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. + +Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. + +America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. + +These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. + +But I want you to know that we are going to be okay. + +When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. + +While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. + +We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. + +In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. + +This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. + +To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. + +Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. + +He will never extinguish their love of freedom. He will never weaken the resolve of the free world. + +We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. + +The pandemic has been punishing. + +And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. + +I understand. + +I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. + +That’s why one of the first things I did as President was fight to pass the American Rescue Plan. + +Because people were hurting. We needed to act, and we did. + +Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. + +It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. + +Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. + +And as my Dad used to say, it gave people a little breathing room. + +And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. + +And it worked. It created jobs. Lots of jobs. + +In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year +than ever before in the history of America. + +Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. + +For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. + +But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. + +Vice President Harris and I ran for office with a new economic vision for America. + +Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up +and the middle out, not from the top down. + +Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. + +America used to have the best roads, bridges, and airports on Earth. + +Now our infrastructure is ranked 13th in the world. + +We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. + +That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. + +This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. + +We’re done talking about infrastructure weeks. + +We’re going to have an infrastructure decade. + +It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. + +As I’ve told Xi Jinping, it is never a good bet to bet against the American people. + +We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. + +And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. + +We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. + +4,000 projects have already been announced. + +And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. + +When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. + +The federal government spends about $600 Billion a year to keep the country safe and secure. + +There’s been a law on the books for almost a century +to make sure taxpayers’ dollars support American jobs and businesses. + +Every Administration says they’ll do it, but we are actually doing it. + +We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. + +But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. + +That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. + +Let me give you one example of why it’s so important to pass it. + +If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. + +It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. + +This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. + +Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. + +Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. + +Smartphones. The Internet. Technology we have yet to invent. + +But that’s just the beginning. + +Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from +$20 billion to $100 billion. + +That would be one of the biggest investments in manufacturing in American history. + +And all they’re waiting for is for you to pass this bill. + +So let’s not wait any longer. Send it to my desk. I’ll sign it. + +And we will really take off. + +And Intel is not alone. + +There’s something happening in America. + +Just look around and you’ll see an amazing story. + +The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. + +Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. + +That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. + +GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. + +All told, we created 369,000 new manufacturing jobs in America just last year. + +Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. + +As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” + +It’s time. + +But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. + +Inflation is robbing them of the gains they might otherwise feel. + +I get it. That’s why my top priority is getting prices under control. + +Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. + +The pandemic also disrupted global supply chains. + +When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. + +Look at cars. + +Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. + +And guess what, prices of automobiles went up. + +So—we have a choice. + +One way to fight inflation is to drive down wages and make Americans poorer. + +I have a better plan to fight inflation. + +Lower your costs, not your wages. + +Make more cars and semiconductors in America. + +More infrastructure and innovation in America. + +More goods moving faster and cheaper in America. + +More jobs where you can earn a good living in America. + +And instead of relying on foreign supply chains, let’s make it in America. + +Economists call it “increasing the productive capacity of our economy.” + +I call it building a better America. + +My plan to fight inflation will lower your costs and lower the deficit. + +17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: + +First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. + +He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. + +But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. + +Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. + +What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. + +Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. + +For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. + +Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. + +Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. + +Second – cut energy costs for families an average of $500 a year by combatting climate change. + +Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. + +Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. + +Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. + +My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. + +My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. + +All of these will lower costs. + +And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. + +The one thing all Americans agree on is that the tax system is not fair. We have to fix it. + +I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. + +Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. + +That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. + +We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. + +That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. + +So that’s my plan. It will grow the economy and lower costs for families. + +So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. + +My plan will not only lower costs to give families a fair shot, it will lower the deficit. + +The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. + +But in my administration, the watchdogs have been welcomed back. + +We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. + +And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. + +By the end of this year, the deficit will be down to less than half what it was before I took office. + +The only president ever to cut the deficit by more than one trillion dollars in a single year. + +Lowering your costs also means demanding more competition. + +I’m a capitalist, but capitalism without competition isn’t capitalism. + +It’s exploitation—and it drives up prices. + +When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. + +We see it happening with ocean carriers moving goods in and out of America. + +During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. + +Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. + +And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. + +That ends on my watch. + +Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. + +We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. + +Let’s pass the Paycheck Fairness Act and paid leave. + +Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. + +Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. + +And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. + +When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. + +For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. + +And I know you’re tired, frustrated, and exhausted. + +But I also know this. + +Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say +we are moving forward safely, back to more normal routines. + +We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. + +Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. + +Under these new guidelines, most Americans in most of the country can now be mask free. + +And based on the projections, more of the country will reach that point across the next couple of weeks. + +Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. + +I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. + +We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. + +Here are four common sense steps as we move forward safely. + +First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. + +We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. + +The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. + +We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. + +We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. + +And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. + +If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. + +We’re leaving no one behind or ignoring anyone’s needs as we move forward. + +And on testing, we have made hundreds of millions of tests available for you to order for free. + +Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. + +Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. + +If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. + +And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. + +I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. + +Third – we can end the shutdown of schools and businesses. We have the tools we need. + +It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. + +We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. + +Our schools are open. Let’s keep it that way. Our kids need to be in school. + +And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. + +We achieved this because we provided free vaccines, treatments, tests, and masks. + +Of course, continuing this costs money. + +I will soon send Congress a request. + +The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. + +Fourth, we will continue vaccinating the world. + +We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. + +And we won’t stop. + +We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. + +Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. + +Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. + +We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. + +I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. + +They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. + +Officer Mora was 27 years old. + +Officer Rivera was 22. + +Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. + +I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. + +I’ve worked on these issues a long time. + +I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. + +So let’s not abandon our streets. Or choose between safety and equal justice. + +Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. + +That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. + +That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. + +We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. + +I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. + +And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. + +And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? + +Ban assault weapons and high-capacity magazines. + +Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. + +These laws don’t infringe on the Second Amendment. They save lives. + +The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. + +In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. + +We cannot let this happen. + +Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. + +Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. + +One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. + +And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. + +A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. + +And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. + +We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. + +We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. + +We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. + +We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. + +We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. + +Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. + +Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. + +It’s not only the right thing to do—it’s the economically smart thing to do. + +That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. + +Let’s get it done once and for all. + +Advancing liberty and justice also requires protecting the rights of women. + +The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. + +If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. + +And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. + +As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. + +While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. + +And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. + +So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. + +First, beat the opioid epidemic. + +There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. + +Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. + +If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. + +Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. + +The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. + +I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. + +Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. + +As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. + +It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. + +And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. + +Third, support our veterans. + +Veterans are the best of us. + +I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. + +My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. + +Our troops in Iraq and Afghanistan faced many dangers. + +One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. + +When they came home, many of the world’s fittest and best trained warriors were never the same. + +Headaches. Numbness. Dizziness. + +A cancer that would put them in a flag-draped coffin. + +I know. + +One of those soldiers was my son Major Beau Biden. + +We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. + +But I’m committed to finding out everything we can. + +Committed to military families like Danielle Robinson from Ohio. + +The widow of Sergeant First Class Heath Robinson. + +He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. + +Stationed near Baghdad, just yards from burn pits the size of football fields. + +Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. + +But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. + +Danielle says Heath was a fighter to the very end. + +He didn’t know how to stop fighting, and neither did she. + +Through her pain she found purpose to demand we do better. + +Tonight, Danielle—we are. + +The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. + +And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. + +I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. + +And fourth, let’s end cancer as we know it. + +This is personal to me and Jill, to Kamala, and to so many of you. + +Cancer is the #2 cause of death in America–second only to heart disease. + +Last month, I announced our plan to supercharge +the Cancer Moonshot that President Obama asked me to lead six years ago. + +Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. + +More support for patients and families. + +To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. + +It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. + +ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. + +A unity agenda for the nation. + +We can do this. + +My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. + +In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. + +We have fought for freedom, expanded liberty, defeated totalitarianism and terror. + +And built the strongest, freest, and most prosperous nation the world has ever known. + +Now is the hour. + +Our moment of responsibility. + +Our test of resolve and conscience, of history itself. + +It is in this moment that our character is formed. Our purpose is found. Our future is forged. + +Well I know this nation. + +We will meet the test. + +To protect freedom and liberty, to expand fairness and opportunity. + +We will save democracy. + +As hard as these times have been, I am more optimistic about America today than I have been my whole life. + +Because I see the future that is within our grasp. + +Because I know there is simply nothing beyond our capacity. + +We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. + +The only nation that can be defined by a single word: possibilities. + +So on this night, in our 245th year as a nation, I have come to report on the State of the Union. + +And my report is this: the State of the Union is strong—because you, the American people, are strong. + +We are stronger today than we were a year ago. + +And we will be stronger a year from now than we are today. + +Now is our moment to meet and overcome the challenges of our time. + +And we will, as one people. + +One America. + +The United States of America. + +May God bless you all. May God protect our troops. \ No newline at end of file diff --git a/packages/langchain/test/agents/tools_agent_test.dart b/packages/langchain/test/agents/tools_agent_test.dart new file mode 100644 index 00000000..e879ba88 --- /dev/null +++ b/packages/langchain/test/agents/tools_agent_test.dart @@ -0,0 +1,226 @@ +@TestOn('vm') +@Timeout(Duration(minutes: 50)) +library; // Uses dart:io + +import 'dart:io'; + +import 'package:langchain/langchain.dart'; +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_ollama/langchain_ollama.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:meta/meta.dart'; +import 'package:test/test.dart'; + +void main() { + late BaseChatModel llm; + const defaultOllamaModel = 'llama3-groq-tool-use'; + const defaultOpenAIModel = 'gpt-4o-mini'; + + group('ChatToolsAgent using Ollama tests', + skip: Platform.environment.containsKey('CI'), () { + setUp(() async { + llm = ChatOllama( + defaultOptions: ChatOllamaOptions( + model: defaultOllamaModel, + temperature: 0, + tools: [CalculatorTool(), searchTool], + keepAlive: 1, + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using Ollama', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); + + group('ChatToolsAgent using OpenAi tests', () { + setUp(() async { + final openaiApiKey = Platform.environment['OPENAI_API_KEY']; + llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultOpenAIModel, + tools: [CalculatorTool(), searchTool], + ), + ); + }); + + test('Test ChatToolsAgent with calculator tool', () async { + await testAgentWithCalculator(llm); + }); + + test('Test ToolsAgent with messages memory', () async { + await testMemory(llm, returnMessages: true); + }); + + test('Test ToolsAgent with string memory throws error', () async { + expect( + () async => testMemory(llm, returnMessages: false), + throwsA(isA()), + ); + }); + + test('Test ToolsAgent LCEL equivalent using OpenAi', () async { + final res = + await testLCDLEquivalent(llm: llm, tool: CalculatorTool()).invoke({ + 'input': 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + }); + expect(res['output'], contains('4.88')); + }); + }); +} + +Future testAgentWithCalculator( + BaseChatModel llm, +) async { + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + ); + final executor = AgentExecutor(agent: agent); + final res = await executor.run( + 'What is 40 raised to the power of 0.43? ' + 'Return the result with 3 decimals.', + ); + expect(res, contains('4.885')); +} + +Future testMemory( + BaseChatModel llm, { + required final bool returnMessages, +}) async { + final memory = ConversationBufferMemory(returnMessages: returnMessages); + final agent = ToolsAgent.fromLLMAndTools( + llm: llm, + memory: memory, + ); + + final executor = AgentExecutor(agent: agent); + + final res1 = await executor.run( + 'Search for cat names. Return only 3 results.', + ); + + expect(res1, contains('AAA')); + expect(res1, contains('BBB')); + expect(res1, contains('CCC')); + expect(res1, isNot(contains('DDD'))); + + final res2 = await executor.run( + 'How many results did the search return? Respond with a number.', + ); + expect(res2, contains('3')); + expect(res2, isNot(contains('1'))); + expect(res2, isNot(contains('2'))); + expect(res2, isNot(contains('4'))); + + final res3 = await executor.run('What was the last result?'); + expect(res3, contains('CCC')); +} + +AgentExecutor testLCDLEquivalent({ + required BaseChatModel llm, + required Tool tool, +}) { + final prompt = ChatPromptTemplate.fromTemplates(const [ + (ChatMessageType.system, 'You are a helpful assistant'), + (ChatMessageType.human, '{input}'), + (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'), + ]); + + final agent = Agent.fromRunnable( + Runnable.mapInput( + (AgentPlanInput planInput) => { + 'input': planInput.inputs['input'], + 'agent_scratchpad': planInput.intermediateSteps + .map((s) { + return s.action.messageLog + + [ + ChatMessage.tool( + toolCallId: s.action.id, + content: s.observation, + ), + ]; + }) + .expand((m) => m) + .toList(growable: false), + }, + ).pipe(prompt).pipe(llm).pipe(const ToolsAgentOutputParser()), + tools: [tool], + ); + + return AgentExecutor(agent: agent); +} + +@immutable +class _SearchInput { + const _SearchInput({ + required this.query, + required this.n, + }); + + final String query; + final int n; + + _SearchInput.fromJson(final Map json) + : this( + query: json['query'] as String, + n: json['n'] as int, + ); + + @override + bool operator ==(covariant _SearchInput other) => + identical(this, other) || query == other.query && n == other.n; + + @override + int get hashCode => query.hashCode ^ n.hashCode; +} + +final searchTool = Tool.fromFunction<_SearchInput, String>( + name: 'search', + description: 'Tool for searching the web.', + inputJsonSchema: const { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The query to search for', + }, + 'n': { + 'type': 'integer', + 'description': 'The number of results to return', + }, + }, + 'required': ['query'], + }, + func: (final _SearchInput toolInput) async { + final n = toolInput.n; + final res = List.generate( + n, + (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}', + ); + return 'Results:\n${res.join('\n')}'; + }, + getInputFromJson: _SearchInput.fromJson, +); diff --git a/packages/langchain_amazon/pubspec.yaml b/packages/langchain_amazon/pubspec.yaml index 41af11b0..d948eb8c 100644 --- a/packages/langchain_amazon/pubspec.yaml +++ b/packages/langchain_amazon/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_amazon issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_amazon homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_anthropic/CHANGELOG.md b/packages/langchain_anthropic/CHANGELOG.md index 6df81faa..167d8c93 100644 --- a/packages/langchain_anthropic/CHANGELOG.md +++ b/packages/langchain_anthropic/CHANGELOG.md @@ -1,3 +1,25 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+2 + + - Update a dependency to the latest release. + +## 0.1.1+1 + + - Update a dependency to the latest release. + +## 0.1.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.1.0 + +- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + ## 0.0.1-dev.1 - Bootstrap project. diff --git a/packages/langchain_anthropic/README.md b/packages/langchain_anthropic/README.md index 2d9f50a0..85d07866 100644 --- a/packages/langchain_anthropic/README.md +++ b/packages/langchain_anthropic/README.md @@ -1,6 +1,17 @@ -# 🦜️🔗 LangChain.dart +# 🦜️🔗 LangChain.dart / Anthropic -Anthropic module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) +[![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +[Anthropic](https://anthropic.com) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). + +## Features + +- Chat models: + * `ChatAnthropic`: wrapper around [Anthropic Messages](https://docs.anthropic.com/en/api/messages) API (Claude). ## License diff --git a/packages/langchain_anthropic/example/langchain_anthropic_example.dart b/packages/langchain_anthropic/example/langchain_anthropic_example.dart index 21f3e9f2..fabef4bd 100644 --- a/packages/langchain_anthropic/example/langchain_anthropic_example.dart +++ b/packages/langchain_anthropic/example/langchain_anthropic_example.dart @@ -1,3 +1,41 @@ -void main() { - // TODO +// ignore_for_file: avoid_print, unused_element +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; + +/// Check the docs for more examples: +/// https://langchaindart.dev +void main() async { + // Uncomment the example you want to run: + await _example1(); + // await _example2(); +} + +/// The most basic example of LangChain is calling a model on some input +Future _example1() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final ChatResult res = await llm.invoke( + PromptValue.string('Tell me a joke'), + ); + print(res); +} + +/// Instead of waiting for the full response from the model, you can stream it +/// while it's being generated +Future _example2() async { + final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY']; + final llm = ChatAnthropic( + apiKey: openAiApiKey, + defaultOptions: const ChatAnthropicOptions(temperature: 1), + ); + final Stream stream = llm.stream( + PromptValue.string('Tell me a joke'), + ); + await stream.forEach((final chunk) => stdout.write(chunk.output.content)); } diff --git a/packages/langchain_anthropic/lib/langchain_anthropic.dart b/packages/langchain_anthropic/lib/langchain_anthropic.dart index d8becc4d..78ee6803 100644 --- a/packages/langchain_anthropic/lib/langchain_anthropic.dart +++ b/packages/langchain_anthropic/lib/langchain_anthropic.dart @@ -1,2 +1,4 @@ /// Anthropic module for LangChain.dart. library; + +export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart new file mode 100644 index 00000000..1c8360d4 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart @@ -0,0 +1,243 @@ +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:http/http.dart' as http; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_tiktoken/langchain_tiktoken.dart'; + +import 'mappers.dart'; +import 'types.dart'; + +/// Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) +/// (aka Claude API). +/// +/// Example: +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: '...'); +/// final messages = [ +/// ChatMessage.system('You are a helpful assistant that translates English to French.'), +/// ChatMessage.humanText('I love programming.'), +/// ]; +/// final prompt = PromptValue.chat(messages); +/// final res = await llm.invoke(prompt); +/// ``` +/// +/// - Docs: https://docs.anthropic.com +/// +/// ### Authentication +/// +/// The Anthropic API uses API keys for authentication. Visit your +/// [API Keys](https://console.anthropic.com/settings/keys) page to retrieve +/// the API key you'll use in your requests. +/// +/// ### Available models +/// +/// The following models are available: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may not be up-to-date. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the updated list. +/// +/// ### Call options +/// +/// You can configure the parameters that will be used when calling the +/// chat completions API in several ways: +/// +/// **Default options:** +/// +/// Use the [defaultOptions] parameter to set the default options. These +/// options will be used unless you override them when generating completions. +/// +/// ```dart +/// final chatModel = ChatAnthropic( +/// apiKey: anthropicApiKey, +/// defaultOptions: const ChatAnthropicOptions( +/// temperature: 0.9, +/// maxTokens: 100, +/// ), +/// ); +/// ``` +/// +/// **Call options:** +/// +/// You can override the default options when invoking the model: +/// +/// ```dart +/// final res = await chatModel.invoke( +/// prompt, +/// options: const ChatAnthropicOptions(temperature: 0.5), +/// ); +/// ``` +/// +/// **Bind:** +/// +/// You can also change the options in a [Runnable] pipeline using the bind +/// method. +/// +/// In this example, we are using two totally different models for each +/// question: +/// +/// ```dart +/// final chatModel = ChatAnthropic(apiKey: anthropicApiKey); +/// const outputParser = StringOutputParser(); +/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); +/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); +/// final chain = Runnable.fromMap({ +/// 'q1': prompt1 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-5-sonnet-20240620)) | outputParser, +/// 'q2': prompt2 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-3-sonnet-20240229)) | outputParser, +/// }); +/// final res = await chain.invoke({'name': 'David'}); +/// ``` +/// +/// ### Advance +/// +/// #### Custom HTTP client +/// +/// You can always provide your own implementation of `http.Client` for further +/// customization: +/// +/// ```dart +/// final client = ChatAnthropic( +/// apiKey: 'ANTHROPIC_API_KEY', +/// client: MyHttpClient(), +/// ); +/// ``` +/// +/// #### Using a proxy +/// +/// ##### HTTP proxy +/// +/// You can use your own HTTP proxy by overriding the `baseUrl` and providing +/// your required `headers`: +/// +/// ```dart +/// final client = ChatAnthropic( +/// baseUrl: 'https://my-proxy.com', +/// headers: {'x-my-proxy-header': 'value'}, +/// ); +/// ``` +/// +/// If you need further customization, you can always provide your own +/// `http.Client`. +/// +/// ##### SOCKS5 proxy +/// +/// To use a SOCKS5 proxy, you can use the +/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a +/// custom `http.Client`. +class ChatAnthropic extends BaseChatModel { + /// Create a new [ChatAnthropic] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Anthropic API key. You can find your API key in the + /// [Anthropic dashboard](https://console.anthropic.com/settings/keys). + /// - [ChatAnthropic.encoding] + /// - [ChatAnthropic.defaultOptions] + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Anthropic's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + ChatAnthropic({ + final String? apiKey, + final String baseUrl = 'https://api.anthropic.com/v1', + final Map? headers, + final Map? queryParams, + final http.Client? client, + super.defaultOptions = const ChatAnthropicOptions( + model: defaultModel, + maxTokens: defaultMaxTokens, + ), + this.encoding = 'cl100k_base', + }) : _client = a.AnthropicClient( + apiKey: apiKey ?? '', + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ); + + /// A client for interacting with Anthropic API. + final a.AnthropicClient _client; + + /// The encoding to use by tiktoken when [tokenize] is called. + /// + /// Anthropic does not provide any API to count tokens, so we use tiktoken + /// to get an estimation of the number of tokens in a prompt. + String encoding; + + @override + String get modelType => 'anthropic-chat'; + + /// The default model to use unless another is specified. + static const defaultModel = 'claude-3-5-sonnet-20240620'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 1024; + + @override + Future invoke( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) async { + final completion = await _client.createMessage( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + ), + ); + return completion.toChatResult(); + } + + @override + Stream stream( + final PromptValue input, { + final ChatAnthropicOptions? options, + }) { + return _client + .createMessageStream( + request: createMessageRequest( + input.toChatMessages(), + options: options, + defaultOptions: defaultOptions, + stream: true, + ), + ) + .transform(MessageStreamEventTransformer()); + } + + /// Tokenizes the given prompt using tiktoken. + /// + /// Currently Anthropic does not provide a tokenizer for the models it supports. + /// So we use tiktoken and [encoding] model to get an approximation + /// for counting tokens. Mind that the actual tokens will be totally + /// different from the ones used by the Anthropic model. + /// + /// If an encoding model is specified in [encoding] field, that + /// encoding is used instead. + /// + /// - [promptValue] The prompt to tokenize. + @override + Future> tokenize( + final PromptValue promptValue, { + final ChatAnthropicOptions? options, + }) async { + final encoding = getEncoding(this.encoding); + return encoding.encode(promptValue.toString()); + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart new file mode 100644 index 00000000..1a011d3c --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/chat_models.dart @@ -0,0 +1,2 @@ +export 'chat_anthropic.dart'; +export 'types.dart'; diff --git a/packages/langchain_anthropic/lib/src/chat_models/mappers.dart b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart new file mode 100644 index 00000000..020ef844 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/mappers.dart @@ -0,0 +1,433 @@ +// ignore_for_file: public_member_api_docs +import 'dart:async'; +import 'dart:convert'; + +import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a; +import 'package:collection/collection.dart' show IterableExtension; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:rxdart/rxdart.dart' show WhereNotNullExtension; + +import 'chat_anthropic.dart'; +import 'types.dart'; + +/// Creates a [CreateMessageRequest] from the given input. +a.CreateMessageRequest createMessageRequest( + final List messages, { + required final ChatAnthropicOptions? options, + required final ChatAnthropicOptions defaultOptions, + final bool stream = false, +}) { + final systemMsg = messages.firstOrNull is SystemChatMessage + ? messages.firstOrNull?.contentAsString + : null; + + final messagesDtos = messages.toMessages(); + final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; + final toolChoiceDto = toolChoice?.toToolChoice(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toTool(toolChoice); + + return a.CreateMessageRequest( + model: a.Model.modelId( + options?.model ?? defaultOptions.model ?? ChatAnthropic.defaultModel, + ), + messages: messagesDtos, + maxTokens: options?.maxTokens ?? + defaultOptions.maxTokens ?? + ChatAnthropic.defaultMaxTokens, + stopSequences: options?.stopSequences ?? defaultOptions.stopSequences, + system: systemMsg, + temperature: options?.temperature ?? defaultOptions.temperature, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + metadata: a.CreateMessageRequestMetadata( + userId: options?.userId ?? defaultOptions.userId, + ), + tools: toolsDtos, + toolChoice: toolChoiceDto, + stream: stream, + ); +} + +extension ChatMessageListMapper on List { + List toMessages() { + final List result = []; + final List consecutiveToolMessages = []; + + void flushToolMessages() { + if (consecutiveToolMessages.isNotEmpty) { + result.add(_mapToolChatMessages(consecutiveToolMessages)); + consecutiveToolMessages.clear(); + } + } + + for (final message in this) { + switch (message) { + case SystemChatMessage(): + flushToolMessages(); + continue; // System message set in request params + case final HumanChatMessage msg: + flushToolMessages(); + final res = _mapHumanChatMessage(msg); + result.add(res); + case final AIChatMessage msg: + flushToolMessages(); + final res = _mapAIChatMessage(msg); + result.add(res); + case final ToolChatMessage msg: + consecutiveToolMessages.add(msg); + case CustomChatMessage(): + throw UnsupportedError('Anthropic does not support custom messages'); + } + } + + flushToolMessages(); // Flush any remaining tool messages + return result; + } + + a.Message _mapHumanChatMessage(final HumanChatMessage msg) { + return a.Message( + role: a.MessageRole.user, + content: switch (msg.content) { + final ChatMessageContentText t => a.MessageContent.text(t.text), + final ChatMessageContentImage i => a.MessageContent.blocks([ + _mapHumanChatMessageContentImage(i), + ]), + final ChatMessageContentMultiModal mm => a.MessageContent.blocks( + mm.parts + .map( + (final part) => switch (part) { + final ChatMessageContentText t => + a.Block.text(text: t.text), + final ChatMessageContentImage i => + _mapHumanChatMessageContentImage(i), + ChatMessageContentMultiModal() => throw ArgumentError( + 'Cannot have multimodal content in multimodal content', + ), + }, + ) + .toList(growable: false), + ), + }, + ); + } + + a.Block _mapHumanChatMessageContentImage(ChatMessageContentImage i) { + return a.Block.image( + source: a.ImageBlockSource( + type: a.ImageBlockSourceType.base64, + mediaType: switch (i.mimeType) { + 'image/jpeg' => a.ImageBlockSourceMediaType.imageJpeg, + 'image/png' => a.ImageBlockSourceMediaType.imagePng, + 'image/gif' => a.ImageBlockSourceMediaType.imageGif, + 'image/webp' => a.ImageBlockSourceMediaType.imageWebp, + _ => + throw AssertionError('Unsupported image MIME type: ${i.mimeType}'), + }, + data: i.data.startsWith('http') + ? throw AssertionError( + 'Anthropic only supports base64-encoded images', + ) + : i.data, + ), + ); + } + + a.Message _mapAIChatMessage(final AIChatMessage msg) { + if (msg.toolCalls.isEmpty) { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.text(msg.content), + ); + } else { + return a.Message( + role: a.MessageRole.assistant, + content: a.MessageContent.blocks( + msg.toolCalls + .map( + (final toolCall) => a.Block.toolUse( + id: toolCall.id, + name: toolCall.name, + input: toolCall.arguments, + ), + ) + .toList(growable: false), + ), + ); + } + } + + a.Message _mapToolChatMessages(final List msgs) { + return a.Message( + role: a.MessageRole.user, + content: a.MessageContent.blocks( + msgs + .map( + (msg) => a.Block.toolResult( + toolUseId: msg.toolCallId, + content: a.ToolResultBlockContent.text(msg.content), + ), + ) + .toList(growable: false), + ), + ); + } +} + +extension MessageMapper on a.Message { + ChatResult toChatResult() { + final (content, toolCalls) = _mapMessageContent(this.content); + return ChatResult( + id: id ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(stopReason), + metadata: { + 'model': model, + 'stop_sequence': stopSequence, + }, + usage: _mapUsage(usage), + ); + } +} + +class MessageStreamEventTransformer + extends StreamTransformerBase { + MessageStreamEventTransformer(); + + String? lastMessageId; + String? lastToolCallId; + + @override + Stream bind(final Stream stream) { + return stream + .map( + (event) => switch (event) { + final a.MessageStartEvent e => _mapMessageStartEvent(e), + final a.MessageDeltaEvent e => _mapMessageDeltaEvent(e), + final a.ContentBlockStartEvent e => _mapContentBlockStartEvent(e), + final a.ContentBlockDeltaEvent e => _mapContentBlockDeltaEvent(e), + final a.ContentBlockStopEvent e => _mapContentBlockStopEvent(e), + final a.MessageStopEvent e => _mapMessageStopEvent(e), + a.PingEvent() => null, + }, + ) + .whereNotNull(); + } + + ChatResult _mapMessageStartEvent(final a.MessageStartEvent e) { + final msg = e.message; + + final msgId = msg.id ?? lastMessageId ?? ''; + lastMessageId = msgId; + final (content, toolCalls) = _mapMessageContent(e.message.content); + + return ChatResult( + id: msgId, + output: AIChatMessage( + content: content, + toolCalls: toolCalls, + ), + finishReason: _mapFinishReason(e.message.stopReason), + metadata: { + if (e.message.model != null) 'model': e.message.model, + if (e.message.stopSequence != null) + 'stop_sequence': e.message.stopSequence, + }, + usage: _mapUsage(e.message.usage), + streaming: true, + ); + } + + ChatResult _mapMessageDeltaEvent(final a.MessageDeltaEvent e) { + return ChatResult( + id: lastMessageId ?? '', + output: const AIChatMessage(content: ''), + finishReason: _mapFinishReason(e.delta.stopReason), + metadata: { + if (e.delta.stopSequence != null) 'stop_sequence': e.delta.stopSequence, + }, + usage: _mapMessageDeltaUsage(e.usage), + streaming: true, + ); + } + + ChatResult _mapContentBlockStartEvent(final a.ContentBlockStartEvent e) { + final (content, toolCall) = _mapContentBlock(e.contentBlock); + if (toolCall != null) { + lastToolCallId = toolCall.id; + } + + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: [if (toolCall != null) toolCall], + ), + finishReason: FinishReason.unspecified, + metadata: const {}, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult _mapContentBlockDeltaEvent(final a.ContentBlockDeltaEvent e) { + final (content, toolCals) = _mapContentBlockDelta(lastToolCallId, e.delta); + return ChatResult( + id: lastMessageId ?? '', + output: AIChatMessage( + content: content, + toolCalls: toolCals, + ), + finishReason: FinishReason.unspecified, + metadata: { + 'index': e.index, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + } + + ChatResult? _mapContentBlockStopEvent(final a.ContentBlockStopEvent e) { + lastToolCallId = null; + return null; + } + + ChatResult? _mapMessageStopEvent(final a.MessageStopEvent e) { + lastMessageId = null; + return null; + } +} + +(String content, List toolCalls) _mapMessageContent( + final a.MessageContent content, +) => + switch (content) { + final a.MessageContentText t => ( + t.value, + const [] + ), + final a.MessageContentBlocks b => ( + b.text, + b.value + .whereType() + .map( + (toolUse) => AIChatMessageToolCall( + id: toolUse.id, + name: toolUse.name, + argumentsRaw: toolUse.input.isNotEmpty + ? json.encode(toolUse.input) + : '', + arguments: toolUse.input, + ), + ) + .toList(growable: false), + ), + }; + +(String content, AIChatMessageToolCall? toolCall) _mapContentBlock( + final a.Block contentBlock, +) => + switch (contentBlock) { + final a.TextBlock t => (t.text, null), + final a.ImageBlock i => (i.source.data, null), + final a.ToolUseBlock tu => ( + '', + AIChatMessageToolCall( + id: tu.id, + name: tu.name, + argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '', + arguments: tu.input, + ), + ), + final a.ToolResultBlock tr => (tr.content.text, null), + }; + +(String content, List toolCalls) _mapContentBlockDelta( + final String? lastToolId, + final a.BlockDelta blockDelta, +) => + switch (blockDelta) { + final a.TextBlockDelta t => (t.text, const []), + final a.InputJsonBlockDelta jb => ( + '', + [ + AIChatMessageToolCall( + id: lastToolId ?? '', + name: '', + argumentsRaw: jb.partialJson ?? '', + arguments: const {}, + ), + ], + ), + }; + +extension ToolSpecListMapper on List { + List toTool(final ChatToolChoice? toolChoice) { + if (toolChoice is ChatToolChoiceNone) { + return const []; + } + + if (toolChoice is ChatToolChoiceForced) { + final tool = firstWhereOrNull((final t) => t.name == toolChoice.name); + return [if (tool != null) _mapTool(tool)]; + } + + return map(_mapTool).toList(growable: false); + } + + a.Tool _mapTool(final ToolSpec tool) { + return a.Tool( + name: tool.name, + description: tool.description, + inputSchema: tool.inputJsonSchema, + ); + } +} + +extension ChatToolChoiceMapper on ChatToolChoice { + a.ToolChoice toToolChoice() { + return switch (this) { + ChatToolChoiceNone _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceAuto _ => const a.ToolChoice(type: a.ToolChoiceType.auto), + ChatToolChoiceRequired _ => + const a.ToolChoice(type: a.ToolChoiceType.any), + final ChatToolChoiceForced t => a.ToolChoice( + type: a.ToolChoiceType.tool, + name: t.name, + ), + }; + } +} + +FinishReason _mapFinishReason( + final a.StopReason? reason, +) => + switch (reason) { + a.StopReason.endTurn => FinishReason.stop, + a.StopReason.maxTokens => FinishReason.length, + a.StopReason.stopSequence => FinishReason.stop, + a.StopReason.toolUse => FinishReason.toolCalls, + null => FinishReason.unspecified, + }; + +LanguageModelUsage _mapUsage(final a.Usage? usage) { + return LanguageModelUsage( + promptTokens: usage?.inputTokens, + responseTokens: usage?.outputTokens, + totalTokens: usage?.inputTokens != null && usage?.outputTokens != null + ? usage!.inputTokens + usage.outputTokens + : null, + ); +} + +LanguageModelUsage _mapMessageDeltaUsage(final a.MessageDeltaUsage? usage) { + return LanguageModelUsage( + responseTokens: usage?.outputTokens, + totalTokens: usage?.outputTokens, + ); +} diff --git a/packages/langchain_anthropic/lib/src/chat_models/types.dart b/packages/langchain_anthropic/lib/src/chat_models/types.dart new file mode 100644 index 00000000..f91abdb3 --- /dev/null +++ b/packages/langchain_anthropic/lib/src/chat_models/types.dart @@ -0,0 +1,160 @@ +import 'package:collection/collection.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; + +/// {@template chat_anthropic_options} +/// Options to pass into the Anthropic Chat Model. +/// +/// Available models: +/// - `claude-3-5-sonnet-20240620` +/// - `claude-3-haiku-20240307` +/// - `claude-3-opus-20240229` +/// - `claude-3-sonnet-20240229` +/// - `claude-2.0` +/// - `claude-2.1` +/// +/// Mind that the list may be outdated. +/// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list. +/// {@endtemplate} +@immutable +class ChatAnthropicOptions extends ChatModelOptions { + /// {@macro chat_anthropic_options} + const ChatAnthropicOptions({ + super.model, + this.maxTokens, + this.stopSequences, + this.temperature, + this.topK, + this.topP, + this.userId, + super.tools, + super.toolChoice, + super.concurrencyLimit, + }); + + /// The maximum number of tokens to generate before stopping. + /// + /// Note that our models may stop _before_ reaching this maximum. This parameter + /// only specifies the absolute maximum number of tokens to generate. + /// + /// Different models have different maximum values for this parameter. See + /// [models](https://docs.anthropic.com/en/docs/models-overview) for details. + final int? maxTokens; + + /// Custom text sequences that will cause the model to stop generating. + /// + /// Anthropic models will normally stop when they have naturally completed + /// their turn. If you want the model to stop generating when it encounters + /// custom strings of text, you can use the `stopSequences` parameter. + final List? stopSequences; + + /// Amount of randomness injected into the response. + /// + /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + /// for analytical / multiple choice, and closer to `1.0` for creative and + /// generative tasks. + /// + /// Note that even with `temperature` of `0.0`, the results will not be fully + /// deterministic. + final double? temperature; + + /// Only sample from the top K options for each subsequent token. + /// + /// Used to remove "long tail" low probability responses. + /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final int? topK; + + /// Use nucleus sampling. + /// + /// In nucleus sampling, we compute the cumulative distribution over all the options + /// for each subsequent token in decreasing probability order and cut it off once it + /// reaches a particular probability specified by `top_p`. You should either alter + /// `temperature` or `top_p`, but not both. + /// + /// Recommended for advanced use cases only. You usually only need to use + /// `temperature`. + final double? topP; + + /// An external identifier for the user who is associated with the request. + /// + /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use + /// this id to help detect abuse. Do not include any identifying information such as + /// name, email address, or phone number. + final String? userId; + + @override + ChatAnthropicOptions copyWith({ + final String? model, + final int? maxTokens, + final List? stopSequences, + final double? temperature, + final int? topK, + final double? topP, + final String? userId, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return ChatAnthropicOptions( + model: model ?? this.model, + maxTokens: maxTokens ?? this.maxTokens, + stopSequences: stopSequences ?? this.stopSequences, + temperature: temperature ?? this.temperature, + topK: topK ?? this.topK, + topP: topP ?? this.topP, + userId: userId ?? this.userId, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + ChatAnthropicOptions merge(covariant final ChatAnthropicOptions? other) { + return copyWith( + model: other?.model, + maxTokens: other?.maxTokens, + stopSequences: other?.stopSequences, + temperature: other?.temperature, + topK: other?.topK, + topP: other?.topP, + userId: other?.userId, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final ChatAnthropicOptions other) { + return model == other.model && + maxTokens == other.maxTokens && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + temperature == other.temperature && + topK == other.topK && + topP == other.topP && + userId == other.userId && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + maxTokens.hashCode ^ + const ListEquality().hash(stopSequences) ^ + temperature.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + userId.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } +} diff --git a/packages/langchain_anthropic/pubspec.yaml b/packages/langchain_anthropic/pubspec.yaml index de768b22..7b23e44a 100644 --- a/packages/langchain_anthropic/pubspec.yaml +++ b/packages/langchain_anthropic/pubspec.yaml @@ -1,11 +1,10 @@ name: langchain_anthropic -description: Anthropic module for LangChain.dart. -version: 0.0.1-dev.1 +description: Anthropic module for LangChain.dart (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.). +version: 0.1.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com -publish_to: none # Remove when the package is ready to be published +documentation: https://langchaindart.dev topics: - ai @@ -14,4 +13,16 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" + +dependencies: + anthropic_sdk_dart: ^0.1.0 + collection: ^1.18.0 + http: ^1.2.2 + langchain_core: 0.3.6 + langchain_tiktoken: ^1.0.1 + meta: ^1.11.0 + rxdart: ">=0.27.7 <0.29.0" + +dev_dependencies: + test: ^1.25.8 diff --git a/packages/langchain_anthropic/pubspec_overrides.yaml b/packages/langchain_anthropic/pubspec_overrides.yaml new file mode 100644 index 00000000..4d7afffa --- /dev/null +++ b/packages/langchain_anthropic/pubspec_overrides.yaml @@ -0,0 +1,6 @@ +# melos_managed_dependency_overrides: anthropic_sdk_dart,langchain_core +dependency_overrides: + anthropic_sdk_dart: + path: ../anthropic_sdk_dart + langchain_core: + path: ../langchain_core diff --git a/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg b/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg new file mode 100644 index 00000000..62d7ca92 Binary files /dev/null and b/packages/langchain_anthropic/test/chat_models/assets/apple.jpeg differ diff --git a/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart new file mode 100644 index 00000000..6023d581 --- /dev/null +++ b/packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart @@ -0,0 +1,293 @@ +// ignore_for_file: avoid_redundant_argument_values, avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_anthropic/langchain_anthropic.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:test/test.dart'; + +void main() { + group('ChatAnthropic tests', () { + const defaultModel = 'claude-3-5-sonnet-20240620'; + + late ChatAnthropic chatModel; + + setUp(() async { + chatModel = ChatAnthropic( + apiKey: Platform.environment['ANTHROPIC_API_KEY'], + defaultOptions: const ChatAnthropicOptions( + model: defaultModel, + ), + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test Text-only input with different models', () async { + final models = [ + 'claude-3-5-sonnet-20240620', + 'claude-3-haiku-20240307', + 'claude-3-opus-20240229', + 'claude-3-sonnet-20240229', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: ChatAnthropicOptions( + model: model, + temperature: 0, + ), + ); + expect(res.id, isNotEmpty); + expect(res.finishReason, isNot(FinishReason.unspecified)); + expect(res.metadata['model'], contains(model.toLowerCase())); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + await Future.delayed(const Duration(seconds: 5)); + } + }); + + test('Text-and-image input', () async { + final res = await chatModel.invoke( + PromptValue.chat([ + ChatMessage.human( + ChatMessageContent.multiModal([ + ChatMessageContent.text('What fruit is this?'), + ChatMessageContent.image( + mimeType: 'image/jpeg', + data: base64.encode( + await File('./test/chat_models/assets/apple.jpeg') + .readAsBytes(), + ), + ), + ]), + ), + ]), + ); + + expect(res.output.content.toLowerCase(), contains('apple')); + }); + + test('Test stop sequence', () async { + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + options: const ChatAnthropicOptions( + model: defaultModel, + stopSequences: ['4'], + ), + ); + final text = res.output.content; + expect(text, contains('123')); + expect(text, isNot(contains('456789'))); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test max tokens', () async { + final res = await chatModel.invoke( + PromptValue.string('Tell me a joke'), + options: const ChatAnthropicOptions( + model: defaultModel, + maxTokens: 10, + ), + ); + expect(res.output.content.length, lessThan(50)); + expect(res.finishReason, FinishReason.length); + }); + + test('Test Multi-turn conversations', () async { + final prompt = PromptValue.chat([ + ChatMessage.humanText( + 'List the numbers from 1 to 9 in order ' + 'without any spaces, commas or additional explanations.', + ), + ChatMessage.ai('123456789'), + ChatMessage.humanText( + 'Remove the number 4 from the list', + ), + ]); + final res = await chatModel.invoke( + prompt, + options: const ChatAnthropicOptions( + model: defaultModel, + temperature: 0, + ), + ); + expect( + res.output.content, + contains('12356789'), + ); + }); + + test('Test streaming', () async { + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 100 in order ' + 'without any spaces, commas or additional explanations.', + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content; + count++; + } + expect(count, greaterThan(1)); + expect(content, contains('123456789')); + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + final model = chatModel.bind( + const ChatAnthropicOptions( + model: defaultModel, + tools: [tool], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); + + test('Test streaming with tools', + timeout: const Timeout(Duration(minutes: 5)), () async { + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = chatModel.bind( + ChatAnthropicOptions( + model: defaultModel, + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final jsonOutputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser); + + final stream = chain.stream({'foo': 'bears'}); + + List lastResult = []; + int count = 0; + await for (final res in stream) { + print(res); + lastResult = res; + count++; + } + + expect(count, greaterThan(1)); + expect(lastResult, hasLength(1)); + final toolCall = lastResult.first; + expect(toolCall.arguments['setup'], isNotEmpty); + expect(toolCall.arguments['punchline'], isNotEmpty); + }); + }); +} diff --git a/packages/langchain_chroma/CHANGELOG.md b/packages/langchain_chroma/CHANGELOG.md index 266080ac..7e458f37 100644 --- a/packages/langchain_chroma/CHANGELOG.md +++ b/packages/langchain_chroma/CHANGELOG.md @@ -1,3 +1,27 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+3 + + - Update a dependency to the latest release. + +## 0.2.1+2 + + - Update a dependency to the latest release. + +## 0.2.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.2.1 + + - Update a dependency to the latest release. + +## 0.2.0+5 + + - Update a dependency to the latest release. + ## 0.2.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart index 9ed252ba..dc170896 100644 --- a/packages/langchain_chroma/lib/src/vector_stores/chroma.dart +++ b/packages/langchain_chroma/lib/src/vector_stores/chroma.dart @@ -42,7 +42,7 @@ import 'types.dart'; /// If you are interacting with Chroma server from a web browser, /// you may need to configure the CORS policy. You can do this by /// passing the following environment variable: -/// ``` +/// ```sh /// docker run -p 8000:8000 -e 'CHROMA_SERVER_CORS_ALLOW_ORIGINS=["*"]' chromadb/chroma /// ``` /// The previous command will allow all origins to access the Chroma server diff --git a/packages/langchain_chroma/pubspec.yaml b/packages/langchain_chroma/pubspec.yaml index 3da841ef..e216f998 100644 --- a/packages/langchain_chroma/pubspec.yaml +++ b/packages/langchain_chroma/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_chroma description: LangChain.dart integration module for Chroma open-source embedding database. -version: 0.2.0+4 +version: 0.2.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,17 +14,17 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - chromadb: ^0.2.0 - http: ^1.1.0 - langchain_core: ^0.3.1 + chromadb: ^0.2.0+1 + http: ^1.2.2 + langchain_core: 0.3.6 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + test: ^1.25.8 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_chroma/pubspec_overrides.yaml b/packages/langchain_chroma/pubspec_overrides.yaml index 4583d481..d53c4efe 100644 --- a/packages/langchain_chroma/pubspec_overrides.yaml +++ b/packages/langchain_chroma/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: chromadb,langchain_openai,openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: chromadb,langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: chromadb: path: ../chromadb @@ -12,3 +12,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_cohere/pubspec.yaml b/packages/langchain_cohere/pubspec.yaml index bcb53a98..ed26abe5 100644 --- a/packages/langchain_cohere/pubspec.yaml +++ b/packages/langchain_cohere/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_cohere issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_cohere homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_community/CHANGELOG.md b/packages/langchain_community/CHANGELOG.md index 7f48bd87..0336c13e 100644 --- a/packages/langchain_community/CHANGELOG.md +++ b/packages/langchain_community/CHANGELOG.md @@ -1,3 +1,34 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.2 + + - **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573)) + - **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839)) + +## 0.3.1 + + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +## 0.3.0 + +- **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) +- **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.2.2 + + - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c)) + +## 0.2.1+1 + + - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330)) + +## 0.2.1 + + - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8)) + + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_community/README.md b/packages/langchain_community/README.md index b76ee3c3..1dcb80e3 100644 --- a/packages/langchain_community/README.md +++ b/packages/langchain_community/README.md @@ -27,6 +27,10 @@ The most popular third-party integrations have their own packages (e.g. [langcha * `WebBaseLoader`: for web pages. - Tools: * `CalculatorTool`: to calculate math expressions. + * `TavilySearchResultsTool`: returns a list of results for a query using the [Tavily](https://tavily.com) search engine. + * `TavilyAnswerTool`: returns an answer for a query using the [Tavily](https://tavily.com) search engine. +- Vector stores: + * `ObjectBoxVectorStore`: [ObjectBox](https://objectbox.io/) on-device vector database. Check out the [API reference](https://pub.dev/documentation/langchain_community/latest) for more details. diff --git a/packages/langchain_community/lib/langchain_community.dart b/packages/langchain_community/lib/langchain_community.dart index 3aee4cf9..b91a968a 100644 --- a/packages/langchain_community/lib/langchain_community.dart +++ b/packages/langchain_community/lib/langchain_community.dart @@ -3,3 +3,4 @@ library; export 'src/document_loaders/document_loaders.dart'; export 'src/tools/tools.dart'; +export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_community/lib/src/document_loaders/csv.dart b/packages/langchain_community/lib/src/document_loaders/csv.dart index 155e520d..2a4a4872 100644 --- a/packages/langchain_community/lib/src/document_loaders/csv.dart +++ b/packages/langchain_community/lib/src/document_loaders/csv.dart @@ -17,7 +17,7 @@ import 'package:langchain_core/documents.dart'; /// and [eol]. /// /// The fields are added to the page content in the following format: -/// ``` +/// ```txt /// {field1Name}: {field1Value} /// {field2Name}: {field2Value} /// ... @@ -56,7 +56,6 @@ class CsvLoader extends BaseDocumentLoader { /// the page content of the document. /// /// If not provided, all row fields are extracted. - /// ``` final List? fields; /// Optional field to override the field names from the CSV file. diff --git a/packages/langchain_community/lib/src/tools/calculator.dart b/packages/langchain_community/lib/src/tools/calculator.dart index 9f41a130..26becb93 100644 --- a/packages/langchain_community/lib/src/tools/calculator.dart +++ b/packages/langchain_community/lib/src/tools/calculator.dart @@ -14,7 +14,7 @@ import 'package:math_expressions/math_expressions.dart'; /// temperature: 0, /// ); /// final tool = CalculatorTool(); -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' diff --git a/packages/langchain_community/lib/src/tools/tavily/mappers.dart b/packages/langchain_community/lib/src/tools/tavily/mappers.dart new file mode 100644 index 00000000..21e907e5 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/mappers.dart @@ -0,0 +1,21 @@ +// ignore_for_file: public_member_api_docs +import 'package:tavily_dart/tavily_dart.dart'; + +import 'types.dart'; + +extension TavilySearchDepthX on TavilySearchDepth { + SearchRequestSearchDepth toSearchRequestSearchDepth() => switch (this) { + TavilySearchDepth.basic => SearchRequestSearchDepth.basic, + TavilySearchDepth.advanced => SearchRequestSearchDepth.advanced, + }; +} + +extension TavilySearchResultX on SearchResult { + TavilySearchResult toTavilySearchResult() => TavilySearchResult( + title: title, + url: url, + content: content, + rawContent: rawContent, + score: score, + ); +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily.dart b/packages/langchain_community/lib/src/tools/tavily/tavily.dart new file mode 100644 index 00000000..64f26c5d --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily.dart @@ -0,0 +1,3 @@ +export 'tavily_answer.dart'; +export 'tavily_search_results.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart new file mode 100644 index 00000000..a5ad637f --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart @@ -0,0 +1,102 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_search_results.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets an answer to the search query. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get a list of search results instead, use the +/// [TavilySearchResultsTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilyAnswerTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // The current weather in New York is clear with a temperature of 22.8°C (73.0°F)... +/// ``` +final class TavilyAnswerTool extends StringTool { + /// Creates a [TavilyAnswerTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilyAnswerTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilyAnswerToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_answer', + description: + 'A search engine optimized for comprehensive, accurate, and trusted answers. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns an answer to the search query - not the search results.', + inputDescription: 'The search query to get an answer to. ' + 'Eg: "What is the weather like in New York?"', + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String toolInput, { + final TavilyAnswerToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: toolInput, + includeAnswer: true, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return res.answer ?? ''; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart new file mode 100644 index 00000000..7e5693c7 --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart @@ -0,0 +1,130 @@ +import 'dart:async'; + +import 'package:http/http.dart' as http; +import 'package:langchain_core/tools.dart'; +import 'package:tavily_dart/tavily_dart.dart'; + +import 'mappers.dart'; +import 'tavily_answer.dart'; +import 'types.dart'; + +/// Tool that queries the [Tavily Search API](https://tavily.com) and +/// gets back a list of search results. +/// +/// The Tavily API uses API keys for authentication. Visit the +/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll +/// use in your requests. +/// +/// If you want to get directly an answer to a search query, use the +/// [TavilyAnswerTool] instead. +/// +/// Example: +/// ```dart +/// final tool = TavilySearchResultsTool( +/// apiKey: Platform.environment['TAVILY_API_KEY']!, +/// ); +/// final res = await tool.invoke('What is the weather like in New York?'); +/// print(res); +/// // [ +/// // { +/// // "title": "Weather in New York", +/// // "url": "https://www.weatherapi.com/", +/// // "content": "{'location': {'lat': 40.71, 'lon': -74.01}, 'current': {'last_updated': '2024-06-20 17:00', 'temp_c': 31.1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png'}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 161, 'wind_dir': 'SSE', 'pressure_mb': 1025.0, 'pressure_in': 30.26, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 48, 'cloud': 0, 'feelslike_c': 33.1, 'feelslike_f': 91.6, 'windchill_c': 29.5, 'windchill_f': 85.0, 'heatindex_c': 30.6, 'heatindex_f': 87.0, 'dewpoint_c': 17.7, 'dewpoint_f': 63.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 16.4, 'gust_kph': 26.4}}", +/// // "score": 0.98855 +/// // }, +/// // ... +/// // ] +/// ``` +final class TavilySearchResultsTool + extends Tool { + /// Creates a [TavilySearchResultsTool] instance. + /// + /// Main configuration options: + /// - `apiKey`: your Tavily API key. You can find your API key in the + /// [Tavily console](https://app.tavily.com/). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `headers`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `queryParams`: global query parameters to send with every request. You + /// can use this to set custom query parameters (e.g. Azure OpenAI API + /// required to attach a `version` query parameter to every request). + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + TavilySearchResultsTool({ + required this.apiKey, + final String? baseUrl, + final Map headers = const {}, + final Map queryParams = const {}, + final http.Client? client, + super.defaultOptions = const TavilySearchResultsToolOptions(), + }) : _client = TavilyClient( + baseUrl: baseUrl, + headers: headers, + queryParams: queryParams, + client: client, + ), + super( + name: 'tavily_search_results', + description: + 'A search engine optimized for comprehensive, accurate, and trusted results. ' + 'Useful for when you need to answer questions about current events. ' + 'The tool returns a JSON object with search results.', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'query': { + 'type': 'string', + 'description': 'The search query to look up. ' + 'Eg: "What is the weather like in New York?"', + }, + }, + 'required': ['query'], + }, + ); + + /// A client for interacting with Tavily API. + final TavilyClient _client; + + /// Your Tavily API key. + String apiKey; + + @override + Future invokeInternal( + final String input, { + final TavilySearchResultsToolOptions? options, + }) async { + final res = await _client.search( + request: SearchRequest( + apiKey: apiKey, + query: input, + searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth) + .toSearchRequestSearchDepth(), + maxResults: options?.maxResults ?? defaultOptions.maxResults, + includeRawContent: + options?.includeRawContent ?? defaultOptions.includeRawContent, + includeDomains: + options?.includeDomains ?? defaultOptions.includeDomains, + excludeDomains: + options?.excludeDomains ?? defaultOptions.excludeDomains, + ), + ); + return TavilySearchResults( + results: res.results + .map((r) => r.toTavilySearchResult()) + .toList(growable: false), + ); + } + + @override + String getInputFromJson(final Map json) { + return json['query'] as String; + } + + @override + void close() { + _client.endSession(); + } +} diff --git a/packages/langchain_community/lib/src/tools/tavily/types.dart b/packages/langchain_community/lib/src/tools/tavily/types.dart new file mode 100644 index 00000000..872723cf --- /dev/null +++ b/packages/langchain_community/lib/src/tools/tavily/types.dart @@ -0,0 +1,181 @@ +import 'dart:convert'; + +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; + +import 'tavily_answer.dart'; +import 'tavily_search_results.dart'; + +/// The depth of the search. +enum TavilySearchDepth { + /// Basic search depth. + basic, + + /// Advanced search depth. + advanced, +} + +/// {@template tavily_search_results} +/// A search results from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResults { + /// {@macro tavily_search_results} + const TavilySearchResults({ + required this.results, + }); + + /// The search results. + final List results; + + @override + String toString() { + return json.encode( + results + .map( + (result) => { + 'title': result.title, + 'url': result.url, + 'content': result.content, + 'rawContent': result.rawContent, + 'score': result.score, + }, + ) + .toList(growable: false), + ); + } +} + +/// {@template tavily_search_result} +/// A search result from the Tavily search engine. +/// {@endtemplate} +@immutable +class TavilySearchResult { + /// {@macro tavily_search_result} + const TavilySearchResult({ + required this.title, + required this.url, + required this.content, + this.rawContent, + required this.score, + }); + + /// The title of the search result url. + final String title; + + /// The url of the search result. + final String url; + + /// The most query related content from the scraped url. + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + final String? rawContent; + + /// The relevance score of the search result. + final double score; +} + +/// {@template tavily_search_results_tool_options} +/// Generation options to pass into the [TavilySearchResultsTool]. +/// {@endtemplate} +class TavilySearchResultsToolOptions extends ToolOptions { + /// {@macro tavily_search_results_tool_options} + const TavilySearchResultsToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeRawContent = false, + this.includeDomains, + this.excludeDomains, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// Include raw content in the search results. + final bool includeRawContent; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; +} + +/// {@template tavily_answer_tool_options} +/// Generation options to pass into the [TavilyAnswerTool]. +/// {@endtemplate} +@immutable +class TavilyAnswerToolOptions extends ToolOptions { + /// {@macro tavily_answer_tool_options} + const TavilyAnswerToolOptions({ + this.maxResults = 5, + this.searchDepth = TavilySearchDepth.basic, + this.includeDomains, + this.excludeDomains, + super.concurrencyLimit, + }); + + /// The number of maximum search results to return. + final int maxResults; + + /// The depth of the search. + final TavilySearchDepth searchDepth; + + /// A list of domains to specifically include in the search results. + final List? includeDomains; + + /// A list of domains to specifically exclude from the search results. + final List? excludeDomains; + + @override + TavilyAnswerToolOptions copyWith({ + final int? maxResults, + final TavilySearchDepth? searchDepth, + final List? includeDomains, + final List? excludeDomains, + final int? concurrencyLimit, + }) { + return TavilyAnswerToolOptions( + maxResults: maxResults ?? this.maxResults, + searchDepth: searchDepth ?? this.searchDepth, + includeDomains: includeDomains ?? this.includeDomains, + excludeDomains: excludeDomains ?? this.excludeDomains, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + TavilyAnswerToolOptions merge( + covariant final TavilyAnswerToolOptions? other, + ) { + return copyWith( + maxResults: other?.maxResults, + searchDepth: other?.searchDepth, + includeDomains: other?.includeDomains, + excludeDomains: other?.excludeDomains, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final TavilyAnswerToolOptions other) { + return maxResults == other.maxResults && + searchDepth == other.searchDepth && + includeDomains == other.includeDomains && + excludeDomains == other.excludeDomains && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return maxResults.hashCode ^ + searchDepth.hashCode ^ + includeDomains.hashCode ^ + excludeDomains.hashCode ^ + concurrencyLimit.hashCode; + } +} diff --git a/packages/langchain_community/lib/src/tools/tools.dart b/packages/langchain_community/lib/src/tools/tools.dart index 9601880a..4aa306f8 100644 --- a/packages/langchain_community/lib/src/tools/tools.dart +++ b/packages/langchain_community/lib/src/tools/tools.dart @@ -1 +1,2 @@ export 'calculator.dart'; +export 'tavily/tavily.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart new file mode 100644 index 00000000..84658107 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart @@ -0,0 +1,220 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' + show + Box, + Condition, + ObjectWithScore, + QueryHnswProperty, + QueryStringProperty; +import 'package:uuid/uuid.dart'; + +/// {@template base_object_box_vector_store} +/// Base class for ObjectBox vector store. +/// +/// The [ObjectBoxVectorStore] class is a pre-configured version of this class, +/// but it can only be used if you don't use ObjectBox for anything else. +/// +/// If you need more control over the ObjectBox store, use this class instead. +/// For example, if you are using ObjectBox to store other entities, or if you +/// need to customize the Document entity class. +/// +/// Here is an example of how to use this class: +/// +/// First, you can define our own Document entity class instead of using the +/// one provided by the [ObjectBoxVectorStore]. In this way, you can customize +/// the entity to your needs. You will need to define the mapping logic between +/// the entity and the LangChain [Document] model. +/// +/// ```dart +/// @Entity() +/// class MyDocumentEntity { +/// MyDocumentEntity({ +/// required this.id, +/// required this.content, +/// required this.metadata, +/// required this.embedding, +/// }); +/// +/// @Id() +/// int internalId = 0; +/// +/// @Unique(onConflict: ConflictStrategy.replace) +/// String id; +/// +/// String content; +/// +/// String metadata; +/// +/// @HnswIndex( +/// dimensions: 768, +/// distanceType: VectorDistanceType.cosine, +/// ) +/// @Property(type: PropertyType.floatVector) +/// List embedding; +/// +/// factory MyDocumentEntity.fromModel( +/// Document doc, List embedding, +/// ) => MyDocumentEntity( +/// id: doc.id ?? '', +/// content: doc.pageContent, +/// metadata: jsonEncode(doc.metadata), +/// embedding: embedding, +/// ); +/// +/// Document toModel() => Document( +/// id: id, +/// pageContent: content, +/// metadata: jsonDecode(metadata), +/// ); +/// } +/// ``` +/// +/// After defining the entity class, you will need to run the ObjectBox +/// generator: +/// +/// ```sh +/// dart run build_runner build --delete-conflicting-outputs +/// ``` +/// +/// Then, you just need to create your custom vector store class that +/// extends [BaseObjectBoxVectorStore] and wire everything up: +/// +/// ```dart +/// class MyCustomVectorStore extends BaseObjectBoxVectorStore { +/// MyCustomVectorStore({ +/// required super.embeddings, +/// required Store store, +/// }) : super( +/// box: store.box(), +/// createEntity: ( +/// String id, +/// String content, +/// String metadata, +/// List embedding, +/// ) => +/// MyDocumentEntity( +/// id: id, +/// content: content, +/// metadata: metadata, +/// embedding: embedding, +/// ), +/// createDocument: (MyDocumentEntity docDto) => docDto.toModel(), +/// getIdProperty: () => MyDocumentEntity_.id, +/// getEmbeddingProperty: () => MyDocumentEntity_.embedding, +/// ); +/// } +/// ``` +/// +/// Now you can use the [MyCustomVectorStore] class to store and search documents. +/// {@endtemplate} +class BaseObjectBoxVectorStore extends VectorStore { + /// {@macro base_object_box_vector_store} + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Box box, + required final T Function( + String id, + String content, + String metadata, + List embedding, + ) createEntity, + required final Document Function(T) createDocument, + required final QueryStringProperty Function() getIdProperty, + required final QueryHnswProperty Function() getEmbeddingProperty, + }) : _box = box, + _createEntity = createEntity, + _createDocument = createDocument, + _getIdProperty = getIdProperty, + _getEmbeddingProperty = getEmbeddingProperty; + + /// The [Box] to store the entities in. + final Box _box; + + /// The function to create an entity [T] from the given data. + final T Function( + String id, + String content, + String metadata, + List embedding, + ) _createEntity; + + /// The function to create a [Document] from the given entity [T]. + final Document Function(T) _createDocument; + + /// A getter for the ID query property. + final QueryStringProperty Function() _getIdProperty; + + /// A getter for the embedding query property. + final QueryHnswProperty Function() _getEmbeddingProperty; + + /// UUID generator. + final Uuid _uuid = const Uuid(); + + @override + Future> addVectors({ + required final List> vectors, + required final List documents, + }) async { + assert(vectors.length == documents.length); + + final List ids = []; + final List records = []; + for (var i = 0; i < documents.length; i++) { + final doc = documents[i]; + final id = doc.id ?? _uuid.v4(); + final entity = _createEntity( + id, + doc.pageContent, + jsonEncode(doc.metadata), + vectors[i], + ); + ids.add(id); + records.add(entity); + } + + _box.putMany(records); + return ids; + } + + @override + Future delete({required final List ids}) { + return _box.query(_getIdProperty().oneOf(ids)).build().removeAsync(); + } + + /// Delete by condition. + /// + /// - [condition] is the condition to delete by. + Future deleteWhere(final Condition condition) { + return _box.query(condition).build().removeAsync(); + } + + @override + Future> similaritySearchByVectorWithScores({ + required final List embedding, + final VectorStoreSimilaritySearch config = + const VectorStoreSimilaritySearch(), + }) async { + var filter = + _getEmbeddingProperty().nearestNeighborsF32(embedding, config.k); + + final filterCondition = config.filter?.values.firstOrNull; + if (filterCondition != null && filterCondition is Condition) { + filter = filter.and(filterCondition); + } + + final query = _box.query(filter).build(); + + Iterable> results = query.findWithScores(); + + if (config.scoreThreshold != null) { + results = results.where((final r) => r.score >= config.scoreThreshold!); + } + + return results + .map((r) => (_createDocument(r.object), r.score)) + .toList(growable: false); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart new file mode 100644 index 00000000..308e7da0 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart @@ -0,0 +1,40 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/documents.dart'; +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class BaseObjectBoxVectorStore extends VectorStore { + BaseObjectBoxVectorStore({ + required super.embeddings, + required final Object? box, + required final Object? createEntity, + required final Object? createDocument, + required final Object? getIdProperty, + required final Object? getEmbeddingProperty, + }); + + @override + Future> addVectors({ + required List> vectors, + required List documents, + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future delete({required List ids}) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + Future deleteWhere(final Object condition) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } + + @override + Future> similaritySearchByVectorWithScores({ + required List embedding, + VectorStoreSimilaritySearch config = const VectorStoreSimilaritySearch(), + }) { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart new file mode 100644 index 00000000..63b1f86d --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart @@ -0,0 +1,7 @@ +export 'ob_io.dart' if (dart.library.js_interop) 'ob_stub.dart' + show + BaseObjectBoxVectorStore, + ObjectBoxDocument, + ObjectBoxDocumentProps, + ObjectBoxSimilaritySearch, + ObjectBoxVectorStore; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart new file mode 100644 index 00000000..db6546e3 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart @@ -0,0 +1,3 @@ +export 'base_objectbox.dart'; +export 'objectbox.dart'; +export 'types.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart new file mode 100644 index 00000000..87329806 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart @@ -0,0 +1,3 @@ +export 'base_objectbox_stub.dart'; +export 'objectbox_stub.dart'; +export 'types_stub.dart'; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json new file mode 100644 index 00000000..32251c2e --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json @@ -0,0 +1,56 @@ +{ + "_note1": "KEEP THIS FILE! Check it into a version control system (VCS) like git.", + "_note2": "ObjectBox manages crucial IDs for your object model. See docs for details.", + "_note3": "If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.", + "entities": [ + { + "id": "1:4662034750769022750", + "lastPropertyId": "5:5762998900965066008", + "name": "ObjectBoxDocument", + "properties": [ + { + "id": "1:328437667364158177", + "name": "internalId", + "type": 6, + "flags": 1 + }, + { + "id": "2:3766173764062654800", + "name": "id", + "type": 9, + "flags": 34848, + "indexId": "1:8818474670164842374" + }, + { + "id": "3:7972539540824041325", + "name": "content", + "type": 9 + }, + { + "id": "4:866532944790310363", + "name": "metadata", + "type": 9 + }, + { + "id": "5:5762998900965066008", + "name": "embedding", + "type": 28, + "flags": 8, + "indexId": "2:3016727589204567263" + } + ], + "relations": [] + } + ], + "lastEntityId": "1:4662034750769022750", + "lastIndexId": "2:3016727589204567263", + "lastRelationId": "0:0", + "lastSequenceId": "0:0", + "modelVersion": 5, + "modelVersionParserMinimum": 5, + "retiredEntityUids": [], + "retiredIndexUids": [], + "retiredPropertyUids": [], + "retiredRelationUids": [], + "version": 1 +} \ No newline at end of file diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart new file mode 100644 index 00000000..22ddeee4 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart @@ -0,0 +1,196 @@ +import 'dart:convert'; + +import 'package:langchain_core/documents.dart'; +import 'package:objectbox/objectbox.dart' + show + Condition, + ConflictStrategy, + Entity, + HnswIndex, + Id, + Property, + PropertyType, + Store, + Unique; + +import 'base_objectbox.dart'; +import 'objectbox.g.dart' as obxg; +import 'types.dart'; + +/// Vector store for the [ObjectBox](https://objectbox.io/) on-device database. +/// +/// ```dart +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); +/// final vectorStore = ObjectBoxVectorStore(embeddings: embeddings); +/// ``` +/// +/// This vector stores creates a [Store] with an [ObjectBoxDocument] entity +/// that persists LangChain [Document]s along with their embeddings. If you +/// need more control over the entity or the storeo, you can use the +/// [BaseObjectBoxVectorStore] class instead. +/// +/// See documentation for more details: +/// - [LangChain.dart ObjectBox docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/objectbox) +/// - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search) +/// +/// ### Filtering +/// +/// You can use the [ObjectBoxSimilaritySearch] class to pass ObjectBox-specific +/// filtering options. +/// +/// [ObjectBoxVectorStore] supports filtering queries by id, content or metadata +/// using ObjectBox's [Condition]. You can define the filter condition in the +/// [ObjectBoxSimilaritySearch] `filterCondition` parameter. Use the +/// [ObjectBoxDocumentProps] class to reference the entity fields to use in the +/// query. +/// +/// For example: +/// ```dart +/// final vectorStore = ObjectBoxVectorStore(...); +/// final res = await vectorStore.similaritySearch( +/// query: 'What should I feed my cat?', +/// config: ObjectBoxSimilaritySearch( +/// k: 5, +/// scoreThreshold: 0.8, +/// filterCondition: ObjectBoxDocumentProps.id.equals('my-id') +/// .or(ObjectBoxDocumentProps.metadata.contains('some-text')), +/// ), +/// ); +/// ``` +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + /// Creates an [ObjectBoxVectorStore] instance. + /// + /// Main configuration options: + /// - [embeddings] The embeddings model to use. + /// - [dimensions] The number of dimensions of the embeddings (vector size). + /// + /// ObjectBox-specific options: + /// - Check the ObjectBox's [Store] documentation for more details on the + /// different options. + ObjectBoxVectorStore({ + required super.embeddings, + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: _openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ).box(), + createEntity: _createObjectBoxDocument, + createDocument: _createDoc, + getIdProperty: () => obxg.ObjectBoxDocument_.id, + getEmbeddingProperty: () => obxg.ObjectBoxDocument_.embedding, + ); + + /// The ObjectBox store. + static Store? _store; + + /// Opens the ObjectBox store. + static Store _openStore({ + required final int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) { + return _store ??= obxg.openStore( + dimensions: dimensions, + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup, + ); + } + + /// Creates an [ObjectBoxDocument] entity. + static ObjectBoxDocument _createObjectBoxDocument( + String id, + String content, + String metadata, + List embedding, + ) => + ObjectBoxDocument(0, id, content, metadata, embedding); + + /// Creates a [Document] from an [ObjectBoxDocument] entity. + static Document _createDoc(ObjectBoxDocument entity) { + Map metadata = const {}; + try { + metadata = jsonDecode(entity.metadata); + } catch (_) {} + return Document( + id: entity.id, + pageContent: entity.content, + metadata: metadata, + ); + } + + /// Closes the ObjectBox store; + /// + /// Don't try to call any other methods after the store is closed. + void close() { + _store?.close(); + _store = null; + } +} + +/// {@template objectbox_document} +/// The ObjectBox entity representing a LangChain [Document]. +/// {@endtemplate} +@Entity() +class ObjectBoxDocument { + /// {@macro objectbox_document} + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + /// The internal ID used by ObjectBox. + @Id() + int internalId = 0; + + /// The ID of the document. + @Unique(onConflict: ConflictStrategy.replace) + String id; + + /// The content of the document. + String content; + + /// The metadata of the document. + String metadata; + + /// The embedding of the document. + @HnswIndex(dimensions: 0) // Set dynamically in the ObjectBoxVectorStore + @Property(type: PropertyType.floatVector) + List embedding; +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +/// +/// Example: +/// ```dart +/// final filterCondition = ObjectBoxDocumentProps.metadata +/// .contains('animal') +/// .or(ObjectBoxDocumentProps.metadata.contains('natural'); +/// ``` +typedef ObjectBoxDocumentProps = obxg.ObjectBoxDocument_; diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart new file mode 100644 index 00000000..4eed33be --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart @@ -0,0 +1,193 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND +// This code was generated by ObjectBox. To update it run the generator again +// with `dart run build_runner build`. +// See also https://docs.objectbox.io/getting-started#generate-objectbox-code + +// ignore_for_file: camel_case_types, depend_on_referenced_packages, avoid_js_rounded_ints, require_trailing_commas, cascade_invocations, strict_raw_type +// coverage:ignore-file + +import 'dart:typed_data'; + +import 'package:flat_buffers/flat_buffers.dart' as fb; +import 'package:objectbox/internal.dart' + as obx_int; // generated code can access "internal" functionality +import 'package:objectbox/objectbox.dart' as obx; + +import '../../../src/vector_stores/objectbox/objectbox.dart'; + +export 'package:objectbox/objectbox.dart'; // so that callers only have to import this file + +List? _entities; + +List _getEntities(int dimensions) { + if (_entities != null) { + final objectBoxDocumentEntity = _entities![0]; + final embeddingProperty = objectBoxDocumentEntity.properties[4]; + + if (embeddingProperty.hnswParams?.dimensions != dimensions) { + _entities = null; + } else { + return _entities!; + } + } + + return _entities ??= [ + obx_int.ModelEntity( + id: const obx_int.IdUid(1, 4662034750769022750), + name: 'ObjectBoxDocument', + lastPropertyId: const obx_int.IdUid(5, 5762998900965066008), + flags: 0, + properties: [ + obx_int.ModelProperty( + id: const obx_int.IdUid(1, 328437667364158177), + name: 'internalId', + type: 6, + flags: 1), + obx_int.ModelProperty( + id: const obx_int.IdUid(2, 3766173764062654800), + name: 'id', + type: 9, + flags: 34848, + indexId: const obx_int.IdUid(1, 8818474670164842374)), + obx_int.ModelProperty( + id: const obx_int.IdUid(3, 7972539540824041325), + name: 'content', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(4, 866532944790310363), + name: 'metadata', + type: 9, + flags: 0), + obx_int.ModelProperty( + id: const obx_int.IdUid(5, 5762998900965066008), + name: 'embedding', + type: 28, + flags: 8, + indexId: const obx_int.IdUid(2, 3016727589204567263), + hnswParams: obx_int.ModelHnswParams( + dimensions: dimensions, + )) + ], + relations: [], + backlinks: []) + ]; +} + +/// Shortcut for [obx.Store.new] that passes [getObjectBoxModel] and for Flutter +/// apps by default a [directory] using `defaultStoreDirectory()` from the +/// ObjectBox Flutter library. +/// +/// Note: for desktop apps it is recommended to specify a unique [directory]. +/// +/// See [obx.Store.new] for an explanation of all parameters. +/// +/// For Flutter apps, also calls `loadObjectBoxLibraryAndroidCompat()` from +/// the ObjectBox Flutter library to fix loading the native ObjectBox library +/// on Android 6 and older. +obx.Store openStore( + {required int dimensions, + String? directory, + int? maxDBSizeInKB, + int? maxDataSizeInKB, + int? fileMode, + int? maxReaders, + bool queriesCaseSensitiveDefault = true, + String? macosApplicationGroup}) { + return obx.Store(getObjectBoxModel(dimensions), + directory: directory, + maxDBSizeInKB: maxDBSizeInKB, + maxDataSizeInKB: maxDataSizeInKB, + fileMode: fileMode, + maxReaders: maxReaders, + queriesCaseSensitiveDefault: queriesCaseSensitiveDefault, + macosApplicationGroup: macosApplicationGroup); +} + +/// Returns the ObjectBox model definition for this project for use with +/// [obx.Store.new]. +obx_int.ModelDefinition getObjectBoxModel(int dimensions) { + final entities = _getEntities(dimensions); + final model = obx_int.ModelInfo( + entities: _getEntities(dimensions), + lastEntityId: const obx_int.IdUid(1, 4662034750769022750), + lastIndexId: const obx_int.IdUid(2, 3016727589204567263), + lastRelationId: const obx_int.IdUid(0, 0), + lastSequenceId: const obx_int.IdUid(0, 0), + retiredEntityUids: const [], + retiredIndexUids: const [], + retiredPropertyUids: const [], + retiredRelationUids: const [], + modelVersion: 5, + modelVersionParserMinimum: 5, + version: 1); + + final bindings = { + ObjectBoxDocument: obx_int.EntityDefinition( + model: entities[0], + toOneRelations: (ObjectBoxDocument object) => [], + toManyRelations: (ObjectBoxDocument object) => {}, + getId: (ObjectBoxDocument object) => object.internalId, + setId: (ObjectBoxDocument object, int id) { + object.internalId = id; + }, + objectToFB: (ObjectBoxDocument object, fb.Builder fbb) { + final idOffset = fbb.writeString(object.id); + final contentOffset = fbb.writeString(object.content); + final metadataOffset = fbb.writeString(object.metadata); + final embeddingOffset = fbb.writeListFloat32(object.embedding); + fbb.startTable(6); + fbb.addInt64(0, object.internalId); + fbb.addOffset(1, idOffset); + fbb.addOffset(2, contentOffset); + fbb.addOffset(3, metadataOffset); + fbb.addOffset(4, embeddingOffset); + fbb.finish(fbb.endTable()); + return object.internalId; + }, + objectFromFB: (obx.Store store, ByteData fbData) { + final buffer = fb.BufferContext(fbData); + final rootOffset = buffer.derefObject(0); + final internalIdParam = + const fb.Int64Reader().vTableGet(buffer, rootOffset, 4, 0); + final idParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 6, ''); + final contentParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 8, ''); + final metadataParam = const fb.StringReader(asciiOptimization: true) + .vTableGet(buffer, rootOffset, 10, ''); + final embeddingParam = + const fb.ListReader(fb.Float32Reader(), lazy: false) + .vTableGet(buffer, rootOffset, 12, []); + final object = ObjectBoxDocument(internalIdParam, idParam, + contentParam, metadataParam, embeddingParam); + + return object; + }) + }; + + return obx_int.ModelDefinition(model, bindings); +} + +/// [ObjectBoxDocument] entity fields to define ObjectBox queries. +class ObjectBoxDocument_ { + /// See [ObjectBoxDocument.internalId]. + static final internalId = + obx.QueryIntegerProperty(_entities![0].properties[0]); + + /// See [ObjectBoxDocument.id]. + static final id = + obx.QueryStringProperty(_entities![0].properties[1]); + + /// See [ObjectBoxDocument.content]. + static final content = + obx.QueryStringProperty(_entities![0].properties[2]); + + /// See [ObjectBoxDocument.metadata]. + static final metadata = + obx.QueryStringProperty(_entities![0].properties[3]); + + /// See [ObjectBoxDocument.embedding]. + static final embedding = + obx.QueryHnswProperty(_entities![0].properties[4]); +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart new file mode 100644 index 00000000..7763f9cf --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart @@ -0,0 +1,53 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'base_objectbox_stub.dart'; + +// This is a stub class +class ObjectBoxVectorStore extends BaseObjectBoxVectorStore { + ObjectBoxVectorStore({ + required super.embeddings, + required int dimensions, + final String? directory, + final int? maxDBSizeInKB, + final int? maxDataSizeInKB, + final int? fileMode, + final int? maxReaders, + final bool queriesCaseSensitiveDefault = true, + final String? macosApplicationGroup, + }) : super( + box: null, + createEntity: null, + createDocument: null, + getIdProperty: null, + getEmbeddingProperty: null, + ); + + void close() { + throw UnsupportedError('ObjectBox is not supported on web platform.'); + } +} + +// This is a stub class +class ObjectBoxDocument { + ObjectBoxDocument( + this.internalId, + this.id, + this.content, + this.metadata, + this.embedding, + ); + + int internalId = 0; + String id; + String content; + String metadata; + List embedding; +} + +// This is a stub class +class ObjectBoxDocumentProps { + static const internalId = null; + static const id = null; + static const content = null; + static const metadata = null; + static const embedding = null; +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart new file mode 100644 index 00000000..aaa08078 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types.dart @@ -0,0 +1,29 @@ +import 'package:langchain_core/vector_stores.dart'; +import 'package:objectbox/objectbox.dart' show Condition; + +/// {@template objectbox_similarity_search} +/// ObjectBox similarity search config. +/// +/// ObjectBox supports filtering queries by id, content or metadata using +/// [Condition]. You can define the filter condition in the [filterCondition] +/// parameter. +/// +/// Example: +/// ```dart +/// ObjectBoxSimilaritySearch( +/// k: 10, +/// scoreThreshold: 1.3, +/// filterCondition: ObjectBoxDocumentProps.metadata.contains('cat'), +/// ); +/// ``` +/// {@endtemplate} +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + /// {@macro objectbox_similarity_search} + ObjectBoxSimilaritySearch({ + super.k = 4, + super.scoreThreshold, + final Condition? filterCondition, + }) : super( + filter: filterCondition != null ? {'filter': filterCondition} : null, + ); +} diff --git a/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart new file mode 100644 index 00000000..4b1aa144 --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart @@ -0,0 +1,11 @@ +// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters +import 'package:langchain_core/vector_stores.dart'; + +// This is a stub class +class ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch { + ObjectBoxSimilaritySearch({ + super.k = 0, + super.scoreThreshold, + Object? filterCondition, + }) : super(filter: null); +} diff --git a/packages/langchain_community/lib/src/vector_stores/vector_stores.dart b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart new file mode 100644 index 00000000..d9da952b --- /dev/null +++ b/packages/langchain_community/lib/src/vector_stores/vector_stores.dart @@ -0,0 +1 @@ +export 'objectbox/ob.dart'; diff --git a/packages/langchain_community/pubspec.yaml b/packages/langchain_community/pubspec.yaml index 29fbdb15..9fd5f428 100644 --- a/packages/langchain_community/pubspec.yaml +++ b/packages/langchain_community/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_community description: LangChain.dart third-party integrations that don't have a dedicated package. -version: 0.2.0+1 +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,17 +13,27 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: beautiful_soup_dart: ^0.3.0 - cross_file: ^0.3.4+1 + cross_file: ^0.3.4+2 csv: ^6.0.0 - http: ^1.1.0 - json_path: ^0.7.1 - langchain_core: ^0.3.1 - math_expressions: ^2.4.0 + flat_buffers: ^23.5.26 + http: ^1.2.2 + json_path: ^0.7.4 + langchain_core: 0.3.6 + math_expressions: ^2.6.0 meta: ^1.11.0 + objectbox: ^4.0.1 + tavily_dart: ^0.1.0 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + build_runner: ^2.4.11 + langchain_openai: ^0.7.2 + objectbox_generator: ^4.0.1 + test: ^1.25.8 + +objectbox: + output_dir: src/vector_stores/objectbox diff --git a/packages/langchain_community/pubspec_overrides.yaml b/packages/langchain_community/pubspec_overrides.yaml index 3508ed77..19febce5 100644 --- a/packages/langchain_community/pubspec_overrides.yaml +++ b/packages/langchain_community/pubspec_overrides.yaml @@ -1,4 +1,10 @@ -# melos_managed_dependency_overrides: langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain_core: path: ../langchain_core + langchain_openai: + path: ../langchain_openai + openai_dart: + path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_community/test/tools/tavily_test.dart b/packages/langchain_community/test/tools/tavily_test.dart new file mode 100644 index 00000000..85214c6c --- /dev/null +++ b/packages/langchain_community/test/tools/tavily_test.dart @@ -0,0 +1,31 @@ +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:test/test.dart'; + +void main() { + group('TavilySearchResultsTool tests', () { + test('Calculate expressions', () async { + final tool = TavilySearchResultsTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res.results, isNotEmpty); + final jsonString = res.toString(); + expect(() => json.decode(jsonString), returnsNormally); + tool.close(); + }); + }); + + group('TavilyAnswerTool tests', () { + test('Invoke TavilyAnswerTool', () async { + final tool = TavilyAnswerTool( + apiKey: Platform.environment['TAVILY_API_KEY']!, + ); + final res = await tool.invoke('What is the weather like in New York?'); + expect(res, isNotEmpty); + tool.close(); + }); + }); +} diff --git a/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart new file mode 100644 index 00000000..fdce5a1b --- /dev/null +++ b/packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart @@ -0,0 +1,190 @@ +import 'dart:io'; + +import 'package:langchain_community/langchain_community.dart'; +import 'package:langchain_core/documents.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:objectbox/objectbox.dart'; +import 'package:test/test.dart'; + +void main() async { + late final OpenAIEmbeddings embeddings; + late final ObjectBoxVectorStore vectorStore; + + setUpAll(() async { + embeddings = OpenAIEmbeddings( + apiKey: Platform.environment['OPENAI_API_KEY'], + ); + vectorStore = ObjectBoxVectorStore( + embeddings: embeddings, + dimensions: 1536, + directory: 'test/vector_stores/objectbox', + ); + }); + + group('ObjectBoxVectorStore tests', () { + test('Test add new vectors', () async { + final res = await vectorStore.addDocuments( + documents: [ + const Document( + id: '1', + pageContent: 'The cat sat on the mat', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '2', + pageContent: 'The dog chased the ball.', + metadata: {'cat': 'animal'}, + ), + const Document( + id: '3', + pageContent: 'The boy ate the apple.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '4', + pageContent: 'The girl drank the milk.', + metadata: {'cat': 'person'}, + ), + const Document( + id: '5', + pageContent: 'The sun is shining.', + metadata: {'cat': 'natural'}, + ), + ], + ); + + expect(res.length, 5); + }); + + test('Test query return 1 result', () async { + final res = await vectorStore.similaritySearch( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(k: 1), + ); + expect(res.length, 1); + expect( + res.first.id, + '5', + ); + }); + + test('Test query with scoreThreshold', () async { + final res = await vectorStore.similaritySearchWithScores( + query: 'Is it raining?', + config: ObjectBoxSimilaritySearch(scoreThreshold: 0.3), + ); + for (final (_, score) in res) { + expect(score, greaterThan(0.3)); + } + }); + + test('Test query with equality filter', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + scoreThreshold: 1.3, + filterCondition: ObjectBoxDocumentProps.metadata.contains('person'), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], 'person'); + } + }); + + test('Test query with filter with multiple operators', () async { + final res = await vectorStore.similaritySearch( + query: 'What are they eating?', + config: ObjectBoxSimilaritySearch( + k: 10, + filterCondition: ObjectBoxDocumentProps.metadata + .contains('animal') + .or(ObjectBoxDocumentProps.metadata.contains('natural')), + ), + ); + for (final doc in res) { + expect(doc.metadata['cat'], isNot('person')); + } + }); + + test('Test delete document', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.delete(ids: ['9999']); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); + + test('Test delete where', () async { + await vectorStore.addDocuments( + documents: [ + const Document( + id: '9999', + pageContent: 'This document will be deleted', + metadata: {'cat': 'xxx'}, + ), + ], + ); + final res1 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res1.length, 1); + expect(res1.first.id, '9999'); + + await vectorStore.deleteWhere( + ObjectBoxDocumentProps.metadata.contains('xxx'), + ); + final res2 = await vectorStore.similaritySearch( + query: 'Deleted doc', + config: ObjectBoxSimilaritySearch( + filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'), + ), + ); + expect(res2.length, 0); + }); + }); + + group('ObjectBoxSimilaritySearch', () { + test('ObjectBoxSimilaritySearch fields', () { + final config = ObjectBoxSimilaritySearch( + k: 5, + scoreThreshold: 0.8, + filterCondition: ObjectBoxDocumentProps.metadata.contains('style1'), + ); + expect(config.k, 5); + expect(config.scoreThreshold, 0.8); + expect(config.filter?['filter'], isA>()); + }); + }); + + tearDownAll(() async { + embeddings.close(); + vectorStore.close(); + await File('test/vector_stores/objectbox/data.mdb').delete(); + await File('test/vector_stores/objectbox/lock.mdb').delete(); + }); +} diff --git a/packages/langchain_core/CHANGELOG.md b/packages/langchain_core/CHANGELOG.md index 25cf9ffd..382d3dd3 100644 --- a/packages/langchain_core/CHANGELOG.md +++ b/packages/langchain_core/CHANGELOG.md @@ -1,3 +1,34 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.6 + + - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f)) + +## 0.3.5 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + +## 0.3.4 + + - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + +## 0.3.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d)) + +## 0.3.2 + + - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18)) + - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70)) + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.3.1 - **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_core/lib/agents.dart b/packages/langchain_core/lib/agents.dart index e99fdb9f..97382b62 100644 --- a/packages/langchain_core/lib/agents.dart +++ b/packages/langchain_core/lib/agents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to agents. -library agents; +library; export 'src/agents/agents.dart'; diff --git a/packages/langchain_core/lib/chains.dart b/packages/langchain_core/lib/chains.dart index 3214cef2..a35484cd 100644 --- a/packages/langchain_core/lib/chains.dart +++ b/packages/langchain_core/lib/chains.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chains. -library chains; +library; export 'src/chains/chains.dart'; diff --git a/packages/langchain_core/lib/chat_history.dart b/packages/langchain_core/lib/chat_history.dart index 316cbccc..726dbd3c 100644 --- a/packages/langchain_core/lib/chat_history.dart +++ b/packages/langchain_core/lib/chat_history.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat history. -library chat_history; +library; export 'src/chat_history/chat_history.dart'; diff --git a/packages/langchain_core/lib/chat_models.dart b/packages/langchain_core/lib/chat_models.dart index 803668df..259fa3c3 100644 --- a/packages/langchain_core/lib/chat_models.dart +++ b/packages/langchain_core/lib/chat_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to chat models. -library chat_models; +library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_core/lib/document_loaders.dart b/packages/langchain_core/lib/document_loaders.dart index 51fdbead..b8340c67 100644 --- a/packages/langchain_core/lib/document_loaders.dart +++ b/packages/langchain_core/lib/document_loaders.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to document loaders. -library document_loaders; +library; export 'src/document_loaders/document_loaders.dart'; diff --git a/packages/langchain_core/lib/documents.dart b/packages/langchain_core/lib/documents.dart index 24d340a4..a0f68ebd 100644 --- a/packages/langchain_core/lib/documents.dart +++ b/packages/langchain_core/lib/documents.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to documents. -library documents; +library; export 'src/documents/documents.dart'; diff --git a/packages/langchain_core/lib/embeddings.dart b/packages/langchain_core/lib/embeddings.dart index 829de2c7..b6c2bc82 100644 --- a/packages/langchain_core/lib/embeddings.dart +++ b/packages/langchain_core/lib/embeddings.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to embeddings. -library embeddings; +library; export 'src/embeddings/embeddings.dart'; diff --git a/packages/langchain_core/lib/exceptions.dart b/packages/langchain_core/lib/exceptions.dart index 4371a3a3..1e0d7fa0 100644 --- a/packages/langchain_core/lib/exceptions.dart +++ b/packages/langchain_core/lib/exceptions.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to exceptions. -library exceptions; +library; export 'src/exceptions/exceptions.dart'; diff --git a/packages/langchain_core/lib/langchain.dart b/packages/langchain_core/lib/langchain.dart index b30c4d14..cf5bb742 100644 --- a/packages/langchain_core/lib/langchain.dart +++ b/packages/langchain_core/lib/langchain.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LangChain. -library langchain; +library; export 'src/langchain/langchain.dart'; diff --git a/packages/langchain_core/lib/language_models.dart b/packages/langchain_core/lib/language_models.dart index 7cabafc7..1fae54b5 100644 --- a/packages/langchain_core/lib/language_models.dart +++ b/packages/langchain_core/lib/language_models.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to language models. -library language_models; +library; export 'src/language_models/language_models.dart'; diff --git a/packages/langchain_core/lib/llms.dart b/packages/langchain_core/lib/llms.dart index 5b98240d..ed130b60 100644 --- a/packages/langchain_core/lib/llms.dart +++ b/packages/langchain_core/lib/llms.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to LLMs. -library llms; +library; export 'src/llms/llms.dart'; diff --git a/packages/langchain_core/lib/memory.dart b/packages/langchain_core/lib/memory.dart index b79467cf..7193923f 100644 --- a/packages/langchain_core/lib/memory.dart +++ b/packages/langchain_core/lib/memory.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to memory. -library memory; +library; export 'src/memory/memory.dart'; diff --git a/packages/langchain_core/lib/output_parsers.dart b/packages/langchain_core/lib/output_parsers.dart index 7f0d0d5f..2915a146 100644 --- a/packages/langchain_core/lib/output_parsers.dart +++ b/packages/langchain_core/lib/output_parsers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to output parsers. -library output_parsers; +library; export 'src/output_parsers/output_parsers.dart'; diff --git a/packages/langchain_core/lib/prompts.dart b/packages/langchain_core/lib/prompts.dart index dbb7ef5b..b7873da5 100644 --- a/packages/langchain_core/lib/prompts.dart +++ b/packages/langchain_core/lib/prompts.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to prompts. -library prompts; +library; export 'src/prompts/prompts.dart'; diff --git a/packages/langchain_core/lib/retrievers.dart b/packages/langchain_core/lib/retrievers.dart index 5b1ec71e..5d1278bf 100644 --- a/packages/langchain_core/lib/retrievers.dart +++ b/packages/langchain_core/lib/retrievers.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to retrievers. -library retrievers; +library; export 'src/retrievers/retrievers.dart'; diff --git a/packages/langchain_core/lib/runnables.dart b/packages/langchain_core/lib/runnables.dart index e111eb58..72b67584 100644 --- a/packages/langchain_core/lib/runnables.dart +++ b/packages/langchain_core/lib/runnables.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to runnables. -library runnables; +library; export 'src/runnables/runnables.dart'; diff --git a/packages/langchain_core/lib/src/chains/types.dart b/packages/langchain_core/lib/src/chains/types.dart index f677381e..e76d876c 100644 --- a/packages/langchain_core/lib/src/chains/types.dart +++ b/packages/langchain_core/lib/src/chains/types.dart @@ -6,7 +6,7 @@ import '../langchain/types.dart'; typedef ChainValues = Map; /// {@template chain_options} -/// Options to pass to a chain. +/// Options to pass to the chain. /// {@endtemplate} @immutable class ChainOptions extends BaseLangChainOptions { diff --git a/packages/langchain_core/lib/src/chat_models/fake.dart b/packages/langchain_core/lib/src/chat_models/fake.dart index f465223d..bda1d6e3 100644 --- a/packages/langchain_core/lib/src/chat_models/fake.dart +++ b/packages/langchain_core/lib/src/chat_models/fake.dart @@ -1,5 +1,8 @@ +import 'package:collection/collection.dart'; + import '../../language_models.dart'; import '../prompts/types.dart'; +import '../tools/base.dart'; import 'base.dart'; import 'types.dart'; @@ -7,11 +10,12 @@ import 'types.dart'; /// Fake Chat Model for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeChatModel extends SimpleChatModel { +class FakeChatModel extends BaseChatModel { /// {@macro fake_list_llm} FakeChatModel({ required this.responses, - }) : super(defaultOptions: const ChatModelOptions()); + super.defaultOptions = const FakeChatModelOptions(), + }); /// Responses to return in order when called. final List responses; @@ -22,17 +26,28 @@ class FakeChatModel extends SimpleChatModel { String get modelType => 'fake-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(responses[_i++ % responses.length]); + Future invoke( + final PromptValue input, { + final FakeChatModelOptions? options, + }) async { + final text = responses[_i++ % responses.length]; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeChatModelOptions? options, }) { final res = responses[_i++ % responses.length].split(''); return Stream.fromIterable(res).map( @@ -40,7 +55,10 @@ class FakeChatModel extends SimpleChatModel { id: 'fake-chat-model', output: AIChatMessage(content: char), finishReason: FinishReason.stop, - metadata: const {}, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, usage: const LanguageModelUsage(), streaming: true, ), @@ -60,41 +78,133 @@ class FakeChatModel extends SimpleChatModel { } } -/// {@template fake_echo_llm} +/// {@template fake_chat_model_options} +/// Fake Chat Model Options for testing. +/// {@endtemplate} +class FakeChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeChatModelOptions({ + super.model, + this.metadata, + super.tools, + super.toolChoice, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + @override + FakeChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return FakeChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeChatModelOptions merge( + covariant final FakeChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + concurrencyLimit.hashCode; + } +} + +/// {@template fake_echo_chat_model} /// Fake Chat Model for testing. /// It just returns the content of the last message of the prompt /// or streams it char by char. /// {@endtemplate} -class FakeEchoChatModel extends SimpleChatModel { - /// {@macro fake_echo_llm} - const FakeEchoChatModel() : super(defaultOptions: const ChatModelOptions()); +class FakeEchoChatModel extends BaseChatModel { + /// {@macro fake_echo_chat_model} + const FakeEchoChatModel({ + super.defaultOptions = const FakeEchoChatModelOptions(), + }); @override String get modelType => 'fake-echo-chat-model'; @override - Future callInternal( - final List messages, { - final ChatModelOptions? options, - }) { - return Future.value(messages.last.contentAsString); + Future invoke( + final PromptValue input, { + final FakeEchoChatModelOptions? options, + }) async { + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + if (throwError) { + throw Exception('Random error'); + } + + final text = input.toChatMessages().last.contentAsString; + final message = AIChatMessage(content: text); + return ChatResult( + id: '1', + output: message, + finishReason: FinishReason.unspecified, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + }, + usage: const LanguageModelUsage(), + ); } @override Stream stream( final PromptValue input, { - final ChatModelOptions? options, + final FakeEchoChatModelOptions? options, }) { final prompt = input.toChatMessages().first.contentAsString.split(''); + final throwError = + options?.throwRandomError ?? defaultOptions.throwRandomError; + + var index = 0; return Stream.fromIterable(prompt).map( - (final char) => ChatResult( - id: 'fake-echo-chat-model', - output: AIChatMessage(content: char), - finishReason: FinishReason.stop, - metadata: const {}, - usage: const LanguageModelUsage(), - streaming: true, - ), + (final char) { + if (throwError && index == prompt.length ~/ 2) { + throw Exception('Random error'); + } + + return ChatResult( + id: 'fake-echo-chat-model', + output: AIChatMessage(content: char), + finishReason: FinishReason.stop, + metadata: { + 'model': options?.model ?? defaultOptions.model, + ...?options?.metadata ?? defaultOptions.metadata, + 'index': index++, + }, + usage: const LanguageModelUsage(), + streaming: true, + ); + }, ); } @@ -110,3 +220,71 @@ class FakeEchoChatModel extends SimpleChatModel { .toList(growable: false); } } + +/// {@template fake_chat_model_options} +/// Fake Echo Chat Model Options for testing. +/// {@endtemplate} +class FakeEchoChatModelOptions extends ChatModelOptions { + /// {@macro fake_chat_model_options} + const FakeEchoChatModelOptions({ + super.model, + this.metadata, + this.throwRandomError = false, + super.tools, + super.toolChoice, + super.concurrencyLimit, + }); + + /// Metadata. + final Map? metadata; + + /// If true, throws a random error. + final bool throwRandomError; + + @override + FakeEchoChatModelOptions copyWith({ + final String? model, + final Map? metadata, + final bool? throwRandomError, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return FakeEchoChatModelOptions( + model: model ?? this.model, + metadata: metadata ?? this.metadata, + throwRandomError: throwRandomError ?? this.throwRandomError, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + FakeEchoChatModelOptions merge( + covariant final FakeEchoChatModelOptions? other, + ) { + return copyWith( + model: other?.model, + metadata: other?.metadata, + throwRandomError: other?.throwRandomError, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final FakeEchoChatModelOptions other) { + return model == other.model && + const MapEquality().equals(metadata, other.metadata) && + throwRandomError == other.throwRandomError && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + const MapEquality().hash(metadata) ^ + throwRandomError.hashCode ^ + concurrencyLimit.hashCode; + } +} diff --git a/packages/langchain_core/lib/src/chat_models/types.dart b/packages/langchain_core/lib/src/chat_models/types.dart index aa072b7e..f92fe4af 100644 --- a/packages/langchain_core/lib/src/chat_models/types.dart +++ b/packages/langchain_core/lib/src/chat_models/types.dart @@ -7,12 +7,14 @@ import '../tools/base.dart'; /// {@template chat_model_options} /// Generation options to pass into the Chat Model. /// {@endtemplate} -class ChatModelOptions extends LanguageModelOptions { +@immutable +abstract class ChatModelOptions extends LanguageModelOptions { /// {@macro chat_model_options} const ChatModelOptions({ - super.concurrencyLimit, + super.model, this.tools, this.toolChoice, + super.concurrencyLimit, }); /// A list of tools the model may call. @@ -20,6 +22,14 @@ class ChatModelOptions extends LanguageModelOptions { /// Controls which (if any) tool is called by the model. final ChatToolChoice? toolChoice; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }); } /// {@template chat_result} @@ -44,7 +54,7 @@ class ChatResult extends LanguageModelResult { final LanguageModelResult other, ) { return ChatResult( - id: other.id, + id: other.id.isNotEmpty ? other.id : id, output: output.concat(other.output), finishReason: finishReason != FinishReason.unspecified && other.finishReason == FinishReason.unspecified @@ -147,6 +157,9 @@ sealed class ChatMessage { /// Merges this message with another by concatenating the content. ChatMessage concat(final ChatMessage other); + + ///Converts ChatMessage to json string + Map toJson(); } /// {@template system_chat_message} @@ -187,6 +200,15 @@ SystemChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content, + }; + + factory SystemChatMessage.fromJson(final Map json) => + SystemChatMessage(content: json['content']); } /// {@template human_chat_message} @@ -279,6 +301,18 @@ HumanChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content.toJson(), + }; + + factory HumanChatMessage.fromJson(final Map json) { + return HumanChatMessage( + content: ChatMessageContentText.fromJson(json['content']), + ); + } } /// {@template ai_chat_message} @@ -366,6 +400,23 @@ AIChatMessage{ toolCalls: $toolCalls, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'content': content, + 'toolCalls': toolCalls.map((toolCall) => toolCall.toJson()).toList(), + }; + + /// Will be used to get an [AIChatMessage] from a json format + factory AIChatMessage.fromJson(final Map json) { + return AIChatMessage( + content: json['content'], + toolCalls: (json['toolCalls'] as List) + .map((toolCallJson) => AIChatMessageToolCall.fromJson(toolCallJson)) + .toList(), + ); + } } /// {@template ai_chat_message_tool_call} @@ -382,6 +433,8 @@ class AIChatMessageToolCall { }); /// The id of the tool to call. + /// + /// This is used to match up the tool results later. final String id; /// The name of the tool to call. @@ -434,6 +487,24 @@ AIChatMessageToolCall{ arguments: $arguments, }'''; } + + /// The toJson function will convert the [AIChatMessageToolCall] to a json object + Map toJson() => { + 'id': id, + 'name': name, + 'argumentsRaw': argumentsRaw, + 'arguments': arguments, + }; + + /// Will be used to get an AIChatMessageToolCall from a json format + factory AIChatMessageToolCall.fromJson(final Map json) { + return AIChatMessageToolCall( + id: json['id'], + name: json['name'], + argumentsRaw: json['argumentsRaw'], + arguments: json['arguments'], + ); + } } /// {@template tool_chat_message} @@ -484,6 +555,17 @@ ToolChatMessage{ content: $content, }'''; } + + @override + Map toJson() => { + 'type': defaultPrefix, + 'toolCallId': toolCallId, + 'content': content, + }; + + /// Will be used to get an [ToolChatMessage] from a json format + factory ToolChatMessage.fromJson(final Map json) => + ToolChatMessage(content: json['content'], toolCallId: json['toolCallId']); } /// {@template custom_chat_message} @@ -529,6 +611,17 @@ CustomChatMessage{ role: $role, }'''; } + + @override + Map toJson() => { + 'type': 'Custom', + 'content': content, + 'role': role, + }; + + /// Will be used to get an [ToolChatMessage] from a json format + factory CustomChatMessage.fromJson(final Map json) => + CustomChatMessage(content: json['content'], role: json['role']); } /// Role of a chat message @@ -580,6 +673,9 @@ sealed class ChatMessageContent { final List parts, ) => ChatMessageContentMultiModal(parts: parts); + + /// Converts the class to a json object + Map toJson(); } /// {@template chat_message_content_text} @@ -608,6 +704,15 @@ ChatMessageContentText{ text: $text, }'''; } + + @override + Map toJson() => { + 'text': text, + }; + + /// Takes a json object and returns a new ChatMessageContentText + factory ChatMessageContentText.fromJson(final Map json) => + ChatMessageContentText(text: json['text']); } /// {@template chat_message_content_image} @@ -623,7 +728,7 @@ class ChatMessageContentImage extends ChatMessageContent { /// Depending on the model, this can be either: /// - The base64 encoded image data - /// - A URL of the image. + /// - A URL of the image (only supported by some providers) final String data; /// The IANA standard MIME type of the source data. @@ -657,6 +762,13 @@ ChatMessageContentImage{ imageDetail: $detail, }'''; } + + @override + Map toJson() => { + 'data': data, + 'mimeType': mimeType, + 'imageDetail': detail, + }; } /// {@template chat_message_content_multi_modal} @@ -689,6 +801,11 @@ ChatMessageContentMultiModal{ parts: $parts, }'''; } + + @override + Map toJson() => { + 'parts': parts.map((part) => part.toJson()).toList(), + }; } /// Specifies the detail level of the image. @@ -713,9 +830,12 @@ sealed class ChatToolChoice { /// The model does not call a tool, and responds to the end-user. static const none = ChatToolChoiceNone(); - /// The model can pick between an end-user or calling a tool. + /// The model can pick between responding to the end-user or calling a tool. static const auto = ChatToolChoiceAuto(); + /// The model must call at least one tool, but doesn’t force a particular tool. + static const required = ChatToolChoiceRequired(); + /// The model is forced to to call the specified tool. factory ChatToolChoice.forced({required final String name}) => ChatToolChoiceForced(name: name); @@ -730,13 +850,21 @@ final class ChatToolChoiceNone extends ChatToolChoice { } /// {@template chat_tool_choice_auto} -/// The model can pick between an end-user or calling a tool. +/// The model can pick between responding to the end-user or calling a tool. /// {@endtemplate} final class ChatToolChoiceAuto extends ChatToolChoice { /// {@macro chat_tool_choice_auto} const ChatToolChoiceAuto(); } +/// {@template chat_tool_choice_required} +/// The model must call at least one tool, but doesn’t force a particular tool. +/// {@endtemplate} +final class ChatToolChoiceRequired extends ChatToolChoice { + /// {@macro chat_tool_choice_none} + const ChatToolChoiceRequired(); +} + /// {@template chat_tool_choice_forced} /// The model is forced to to call the specified tool. /// {@endtemplate} diff --git a/packages/langchain_core/lib/src/chat_models/utils.dart b/packages/langchain_core/lib/src/chat_models/utils.dart index 5c84a142..ebfc011c 100644 --- a/packages/langchain_core/lib/src/chat_models/utils.dart +++ b/packages/langchain_core/lib/src/chat_models/utils.dart @@ -1,6 +1,6 @@ import 'types.dart'; -/// Extensions on [List]. +/// Extensions on `List`. extension ChatMessagesX on List { /// This function is to get a string representation of the chat messages /// based on the message content and role. diff --git a/packages/langchain_core/lib/src/langchain/types.dart b/packages/langchain_core/lib/src/langchain/types.dart index 8dabca52..091429d6 100644 --- a/packages/langchain_core/lib/src/langchain/types.dart +++ b/packages/langchain_core/lib/src/langchain/types.dart @@ -3,7 +3,7 @@ import 'package:meta/meta.dart'; import '../runnables/types.dart'; /// {@template base_lang_chain_options} -/// Base class for LangChain components' options. +/// Base options class for LangChain components. /// {@endtemplate} @immutable class BaseLangChainOptions extends RunnableOptions { diff --git a/packages/langchain_core/lib/src/language_models/base.dart b/packages/langchain_core/lib/src/language_models/base.dart index 33d3b002..3156cd74 100644 --- a/packages/langchain_core/lib/src/language_models/base.dart +++ b/packages/langchain_core/lib/src/language_models/base.dart @@ -1,5 +1,3 @@ -import 'package:meta/meta.dart'; - import '../langchain/base.dart'; import '../prompts/types.dart'; import 'types.dart'; @@ -58,33 +56,4 @@ abstract class BaseLanguageModel< @override String toString() => modelType; - - /// Throws an error if the model id is not specified. - @protected - Never throwNullModelError() { - throw ArgumentError(''' -Null model in $runtimeType. - -You need to specify the id of model to use either in `$runtimeType.defaultOptions` -or in the options passed when invoking the model. - -Example: -``` -// In defaultOptions -final model = $runtimeType( - defaultOptions: ${runtimeType}Options( - model: 'model-id', - ), -); - -// Or when invoking the model -final res = await model.invoke( - prompt, - options: ${runtimeType}Options( - model: 'model-id', - ), -); -``` -'''); - } } diff --git a/packages/langchain_core/lib/src/language_models/types.dart b/packages/langchain_core/lib/src/language_models/types.dart index 8112ab37..39e071bd 100644 --- a/packages/langchain_core/lib/src/language_models/types.dart +++ b/packages/langchain_core/lib/src/language_models/types.dart @@ -4,14 +4,25 @@ import 'package:meta/meta.dart'; import '../langchain/types.dart'; /// {@template language_model_options} -/// Generation options to pass into the language model. +/// Options to pass into the language model. /// {@endtemplate} @immutable abstract class LanguageModelOptions extends BaseLangChainOptions { /// {@macro language_model_options} const LanguageModelOptions({ + this.model, super.concurrencyLimit, }); + + /// ID of the language model to use. + /// Check the provider's documentation for available models. + final String? model; + + @override + LanguageModelOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }); } /// {@template language_model} @@ -99,12 +110,16 @@ class LanguageModelUsage { }); /// The number of tokens in the prompt. + /// + /// Some providers call this "input_tokens". final int? promptTokens; /// The total number of billable characters in the prompt if applicable. final int? promptBillableCharacters; /// The number of tokens in the completion. + /// + /// Some providers call this "output_tokens". final int? responseTokens; /// The total number of billable characters in the completion if applicable. @@ -172,9 +187,13 @@ LanguageModelUsage{ /// The reason the model stopped generating tokens. enum FinishReason { /// The model hit a natural stop point or a provided stop sequence. + /// + /// Some providers call this "end_turn". stop, /// The maximum number of tokens specified in the request was reached. + /// + /// Some providers call this "max_tokens". length, /// The content was flagged for content filter reasons. @@ -184,6 +203,8 @@ enum FinishReason { recitation, /// The model called a tool. + /// + /// Some providers call this "tool_use". toolCalls, /// The finish reason is unspecified. diff --git a/packages/langchain_core/lib/src/llms/fake.dart b/packages/langchain_core/lib/src/llms/fake.dart index 0781e607..ffb64c00 100644 --- a/packages/langchain_core/lib/src/llms/fake.dart +++ b/packages/langchain_core/lib/src/llms/fake.dart @@ -7,11 +7,11 @@ import 'types.dart'; /// Fake LLM for testing. /// You can pass in a list of responses to return in order when called. /// {@endtemplate} -class FakeLLM extends SimpleLLM { +class FakeLLM extends SimpleLLM { /// {@macro fake_list_llm} FakeLLM({ required this.responses, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Responses to return in order when called. final List responses; @@ -60,13 +60,35 @@ class FakeLLM extends SimpleLLM { } } +/// {@template fake_llm_options} +/// Fake LLM options for testing. +/// {@endtemplate} +class FakeLLMOptions extends LLMOptions { + /// {@macro fake_llm_options} + const FakeLLMOptions({ + super.model, + super.concurrencyLimit, + }); + + @override + FakeLLMOptions copyWith({ + final String? model, + final int? concurrencyLimit, + }) { + return FakeLLMOptions( + model: model ?? this.model, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } +} + /// {@template fake_echo_llm} /// Fake LLM for testing. /// It just returns the prompt or streams it char by char. /// {@endtemplate} -class FakeEchoLLM extends BaseLLM { +class FakeEchoLLM extends BaseLLM { /// {@macro fake_echo_llm} - const FakeEchoLLM() : super(defaultOptions: const LLMOptions()); + const FakeEchoLLM() : super(defaultOptions: const FakeLLMOptions()); @override String get modelType => 'fake-echo'; @@ -122,11 +144,11 @@ class FakeEchoLLM extends BaseLLM { /// Fake LLM for testing. /// It returns the string returned by the [handler] function. /// {@endtemplate} -class FakeHandlerLLM extends SimpleLLM { +class FakeHandlerLLM extends SimpleLLM { /// {@macro fake_handler_llm} FakeHandlerLLM({ required this.handler, - }) : super(defaultOptions: const LLMOptions()); + }) : super(defaultOptions: const FakeLLMOptions()); /// Function called to generate the response. final String Function( diff --git a/packages/langchain_core/lib/src/llms/types.dart b/packages/langchain_core/lib/src/llms/types.dart index d6bed6f3..7a81a0ab 100644 --- a/packages/langchain_core/lib/src/llms/types.dart +++ b/packages/langchain_core/lib/src/llms/types.dart @@ -3,12 +3,13 @@ import 'package:meta/meta.dart'; import '../language_models/types.dart'; /// {@template llm_options} -/// Generation options to pass into the LLM. +/// Options to pass into the LLM. /// {@endtemplate} @immutable -class LLMOptions extends LanguageModelOptions { +abstract class LLMOptions extends LanguageModelOptions { /// {@macro llm_options} const LLMOptions({ + super.model, super.concurrencyLimit, }); } diff --git a/packages/langchain_core/lib/src/output_parsers/string.dart b/packages/langchain_core/lib/src/output_parsers/string.dart index f5ea11a8..9dd4722a 100644 --- a/packages/langchain_core/lib/src/output_parsers/string.dart +++ b/packages/langchain_core/lib/src/output_parsers/string.dart @@ -68,9 +68,7 @@ class StringOutputParser if (reduceOutputStream) { yield await inputStream.map(_parse).reduce((final a, final b) => '$a$b'); } else { - await for (final input in inputStream) { - yield _parse(input); - } + yield* inputStream.map(_parse); } } diff --git a/packages/langchain_core/lib/src/output_parsers/types.dart b/packages/langchain_core/lib/src/output_parsers/types.dart index 460840fa..9e8906b7 100644 --- a/packages/langchain_core/lib/src/output_parsers/types.dart +++ b/packages/langchain_core/lib/src/output_parsers/types.dart @@ -60,7 +60,9 @@ class ParsedToolCall { } @override - int get hashCode => id.hashCode ^ name.hashCode ^ arguments.hashCode; + int get hashCode { + return id.hashCode ^ name.hashCode ^ arguments.hashCode; + } @override String toString() { diff --git a/packages/langchain_core/lib/src/prompts/types.dart b/packages/langchain_core/lib/src/prompts/types.dart index c2a9474b..3bd9756b 100644 --- a/packages/langchain_core/lib/src/prompts/types.dart +++ b/packages/langchain_core/lib/src/prompts/types.dart @@ -90,7 +90,7 @@ class StringPromptValue implements PromptValue { /// /// When [toString] is called, it returns the string representation of the /// messages using the following format: -/// ``` +/// ```txt /// System: /// Human: /// AI: @@ -145,7 +145,7 @@ class ChatPromptValue implements PromptValue { return message.concat(otherMessage); } }) - .whereNotNull() + .nonNulls .toList(growable: false), ), }; diff --git a/packages/langchain_core/lib/src/retrievers/types.dart b/packages/langchain_core/lib/src/retrievers/types.dart index 4ed82147..e3938296 100644 --- a/packages/langchain_core/lib/src/retrievers/types.dart +++ b/packages/langchain_core/lib/src/retrievers/types.dart @@ -9,7 +9,9 @@ import '../vector_stores/types.dart'; @immutable class RetrieverOptions extends BaseLangChainOptions { /// {@macro retriever_options} - const RetrieverOptions(); + const RetrieverOptions({ + super.concurrencyLimit, + }); } /// {@template vector_store_retriever_options} @@ -19,10 +21,22 @@ class VectorStoreRetrieverOptions extends RetrieverOptions { /// {@macro vector_store_retriever_options} const VectorStoreRetrieverOptions({ this.searchType = const VectorStoreSimilaritySearch(), + super.concurrencyLimit, }); /// The type of search to perform, either: /// - [VectorStoreSearchType.similarity] (default) /// - [VectorStoreSearchType.mmr] final VectorStoreSearchType searchType; + + @override + VectorStoreRetrieverOptions copyWith({ + final VectorStoreSearchType? searchType, + final int? concurrencyLimit, + }) { + return VectorStoreRetrieverOptions( + searchType: searchType ?? this.searchType, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } } diff --git a/packages/langchain_core/lib/src/runnables/binding.dart b/packages/langchain_core/lib/src/runnables/binding.dart index 75e6084f..1bd1bee4 100644 --- a/packages/langchain_core/lib/src/runnables/binding.dart +++ b/packages/langchain_core/lib/src/runnables/binding.dart @@ -60,7 +60,9 @@ class RunnableBinding + extends Runnable { + /// {@macro runnable_fallback} + RunnableWithFallback({ + required this.mainRunnable, + required this.fallbacks, + }) : super(defaultOptions: const RunnableOptions()); + + /// The Runnable to run first. + final Runnable mainRunnable; + + /// A sequence of fallbacks to try if the [mainRunnable] fails. + final List> fallbacks; + + @override + Future invoke(RunInput input, {RunnableOptions? options}) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + return await runnable.invoke( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + List? currentOptions; + if (firstError == null) { + currentOptions = options; + } else { + final compatibleOptions = + options?.map(runnable.getCompatibleOptions).toList(growable: false); + final hasNullOptions = + compatibleOptions?.any((o) => o == null) ?? false; + if (!hasNullOptions) { + currentOptions = compatibleOptions?.cast(); + } + } + + try { + return await runnable.batch( + inputs, + options: currentOptions, + ); + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } + + @override + Stream stream( + RunInput input, { + RunnableOptions? options, + }) async* { + Object? firstError; + for (final runnable in [mainRunnable, ...fallbacks]) { + try { + final stream = runnable.stream( + input, + options: firstError == null + ? options + : runnable.getCompatibleOptions(options), + ); + await for (final output in stream) { + yield output; + } + return; + } catch (e) { + firstError ??= e; + } + } + throw Exception('All runnables failed. First error: $firstError'); + } +} diff --git a/packages/langchain_core/lib/src/runnables/function.dart b/packages/langchain_core/lib/src/runnables/function.dart index 32843641..7af32bcc 100644 --- a/packages/langchain_core/lib/src/runnables/function.dart +++ b/packages/langchain_core/lib/src/runnables/function.dart @@ -89,7 +89,7 @@ class RunnableFunction final RunnableOptions? options, }) async { if (_invokeFunc != null) { - return _invokeFunc!(input, options); + return _invokeFunc(input, options); } else { return stream(input, options: options).first; } @@ -113,7 +113,7 @@ class RunnableFunction final RunnableOptions? options, }) async* { if (_streamFunc != null) { - yield* _streamFunc!(inputStream, options); + yield* _streamFunc(inputStream, options); } else { yield* inputStream.asyncMap((final input) async { return invoke(input, options: options); diff --git a/packages/langchain_core/lib/src/runnables/map.dart b/packages/langchain_core/lib/src/runnables/map.dart index f9029da9..0b3cb925 100644 --- a/packages/langchain_core/lib/src/runnables/map.dart +++ b/packages/langchain_core/lib/src/runnables/map.dart @@ -108,4 +108,11 @@ class RunnableMap }), ).asBroadcastStream(); } + + @override + void close() { + for (final step in steps.values) { + step.close(); + } + } } diff --git a/packages/langchain_core/lib/src/runnables/retry.dart b/packages/langchain_core/lib/src/runnables/retry.dart new file mode 100644 index 00000000..e49c4d22 --- /dev/null +++ b/packages/langchain_core/lib/src/runnables/retry.dart @@ -0,0 +1,63 @@ +import 'dart:async'; +import '../utils/retry_client.dart'; +import 'runnables.dart'; + +/// {@template runnable_retry} +/// A [Runnable] that automatically retries the operation if it fails. +/// +/// You can create a [RunnableRetry] using [Runnable.withRetry], passing in the +/// [RetryOptions]. +/// +/// When [invoke] or [batch] is called on the runnable, if the initial attempt +/// fails, it will be retried according to the specified [RetryOptions]. +/// +/// Example usage: +/// ```dart +/// final model = ChatOpenAI(...); +/// final modelWithRetry = model.withRetry(maxRetries: 2); +/// final res = await modelWithRetry.invoke(...); +/// ``` +/// {@endtemplate} +class RunnableRetry + extends Runnable { + /// {@macro runnable_retry} + RunnableRetry({ + required this.runnable, + required super.defaultOptions, + required this.retryOptions, + }); + + /// Runnable that will be retried on error. + final Runnable runnable; + + /// Options to retry the runnable. + final RetryOptions retryOptions; + + @override + Future invoke( + RunInput input, { + RunnableOptions? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.invoke( + input, + options: options, + ), + ); + } + + @override + Future> batch( + List inputs, { + List? options, + }) async { + return retryClient( + options: retryOptions, + fn: () => runnable.batch( + inputs, + options: options, + ), + ); + } +} diff --git a/packages/langchain_core/lib/src/runnables/runnable.dart b/packages/langchain_core/lib/src/runnables/runnable.dart index 792bc80a..05f828ca 100644 --- a/packages/langchain_core/lib/src/runnables/runnable.dart +++ b/packages/langchain_core/lib/src/runnables/runnable.dart @@ -2,11 +2,13 @@ import 'dart:async'; import '../../utils.dart'; import 'binding.dart'; +import 'fallbacks.dart'; import 'function.dart'; import 'input_map.dart'; import 'input_stream_map.dart'; import 'map.dart'; import 'passthrough.dart'; +import 'retry.dart'; import 'router.dart'; import 'sequence.dart'; import 'types.dart'; @@ -282,6 +284,57 @@ abstract class Runnable withFallbacks( + List> fallbacks, + ) { + return RunnableWithFallback( + mainRunnable: this, + fallbacks: fallbacks, + ); + } + + /// Adds retry logic to an existing runnable. + /// + /// This method create a [RunnableRetry] instance, if the current [Runnable] + /// throws an exception during invocation, it will be retried based on the + /// configuration provided. By default the runnable will be retried 3 times + /// with exponential delay between each retry. + /// + /// - [maxRetries] - max attempts to retry the runnable. + /// - [retryIf] - evaluator function to check whether to retry based the + /// exception thrown. + /// - [delayDurations] - by default runnable will be retried based on an + /// exponential backoff strategy with base delay as 1 second. But you can + /// override this behavior by providing an optional list of [Duration]s. + /// - [addJitter] - whether to add jitter to the delay. + RunnableRetry withRetry({ + final int maxRetries = 3, + final FutureOr Function(Object e)? retryIf, + final List? delayDurations, + final bool addJitter = false, + }) { + return RunnableRetry( + runnable: this, + defaultOptions: defaultOptions, + retryOptions: RetryOptions( + maxRetries: maxRetries, + retryIf: retryIf, + delayDurations: delayDurations, + addJitter: addJitter, + ), + ); + } + /// Returns the given [options] if they are compatible with the [Runnable], /// otherwise returns `null`. CallOptions? getCompatibleOptions( @@ -289,4 +342,14 @@ abstract class Runnable Stream streamFromInputStream( final Stream inputStream, { final RunnableOptions? options, - }) { + }) async* { Stream nextStepStream; try { nextStepStream = first.streamFromInputStream( @@ -152,7 +152,7 @@ class RunnableSequence } try { - return last.streamFromInputStream( + yield* last.streamFromInputStream( nextStepStream, options: last.getCompatibleOptions(options), ); @@ -205,4 +205,11 @@ Please ensure that the output of the previous runnable in the sequence matches t '''; throw ArgumentError(errorMessage); } + + @override + void close() { + for (final step in steps) { + step.close(); + } + } } diff --git a/packages/langchain_core/lib/src/runnables/types.dart b/packages/langchain_core/lib/src/runnables/types.dart index 0a70a4d3..efec915e 100644 --- a/packages/langchain_core/lib/src/runnables/types.dart +++ b/packages/langchain_core/lib/src/runnables/types.dart @@ -13,4 +13,31 @@ class RunnableOptions { /// The maximum number of concurrent calls that the runnable can make. /// Defaults to 1000 (different Runnable types may have different defaults). final int concurrencyLimit; + + /// Creates a copy of this [RunnableOptions] with the given fields replaced + /// by the new values. + RunnableOptions copyWith({ + int? concurrencyLimit, + }) { + return RunnableOptions( + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + /// Merges this [RunnableOptions] with another [RunnableOptions]. + RunnableOptions merge(RunnableOptions? other) { + return copyWith( + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final RunnableOptions other) { + return concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_core/lib/src/tools/base.dart b/packages/langchain_core/lib/src/tools/base.dart index e676fc6b..079dbab7 100644 --- a/packages/langchain_core/lib/src/tools/base.dart +++ b/packages/langchain_core/lib/src/tools/base.dart @@ -12,13 +12,13 @@ import 'types.dart'; /// {@template tool_spec} /// The specification of a LangChain tool without the actual implementation. /// {@endtemplate} -@immutable class ToolSpec { /// {@macro tool_spec} const ToolSpec({ required this.name, required this.description, required this.inputJsonSchema, + this.strict = false, }); /// The unique name of the tool that clearly communicates its purpose. @@ -51,18 +51,31 @@ class ToolSpec { /// ``` final Map inputJsonSchema; + /// Whether to enable strict schema adherence when generating the tool call. + /// If set to true, the model will follow the exact schema defined in the + /// [inputJsonSchema] field. + /// + /// This is only supported by some providers (e.g. OpenAI). Mind that when + /// enabled, only a subset of JSON Schema may be supported. Check out the + /// provider's tool calling documentation for more information. + final bool strict; + @override bool operator ==(covariant final ToolSpec other) { final mapEquals = const DeepCollectionEquality().equals; return identical(this, other) || name == other.name && description == other.description && - mapEquals(inputJsonSchema, other.inputJsonSchema); + mapEquals(inputJsonSchema, other.inputJsonSchema) && + strict == other.strict; } @override int get hashCode => - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; @override String toString() { @@ -71,9 +84,20 @@ ToolSpec{ name: $name, description: $description, inputJsonSchema: $inputJsonSchema, + strict: $strict, } '''; } + + /// Converts the tool spec to a JSON-serializable map. + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + 'strict': strict, + }; + } } /// {@template tool} @@ -94,6 +118,7 @@ abstract base class Tool inputJsonSchema; + @override + final bool strict; + /// Whether to return the tool's output directly. /// Setting this to true means that after the tool is called, /// the AgentExecutor will stop looping. @@ -124,7 +152,9 @@ abstract base class Tool inputJsonSchema, + final bool strict = false, required final FutureOr Function(Input input) func, Input Function(Map json)? getInputFromJson, final bool returnDirect = false, @@ -149,6 +180,7 @@ abstract base class Tool json['input'] as Input, returnDirect: returnDirect, @@ -209,12 +241,26 @@ abstract base class Tool - name.hashCode ^ description.hashCode ^ inputJsonSchema.hashCode; + name.hashCode ^ + description.hashCode ^ + inputJsonSchema.hashCode ^ + strict.hashCode; + + @override + Map toJson() { + return { + 'name': name, + 'description': description, + 'inputJsonSchema': inputJsonSchema, + 'strict': strict, + }; + } } /// {@template tool_func} @@ -228,6 +274,7 @@ final class _ToolFunc required super.name, required super.description, required super.inputJsonSchema, + required super.strict, required FutureOr Function(Input input) function, required Input Function(Map json) getInputFromJson, super.returnDirect = false, diff --git a/packages/langchain_core/lib/src/tools/string.dart b/packages/langchain_core/lib/src/tools/string.dart index 3c9973d5..43e1e2e7 100644 --- a/packages/langchain_core/lib/src/tools/string.dart +++ b/packages/langchain_core/lib/src/tools/string.dart @@ -14,6 +14,7 @@ abstract base class StringTool required super.name, required super.description, final String inputDescription = 'The input to the tool', + super.strict = false, super.returnDirect = false, super.handleToolError, super.defaultOptions, @@ -36,6 +37,8 @@ abstract base class StringTool /// purpose. /// - [description] is used to tell the model how/when/why to use the tool. /// You can provide few-shot examples as a part of the description. + /// - [strict] whether to enable strict schema adherence when generating the + /// tool call (only supported by some providers). /// - [func] is the function that will be called when the tool is run. /// - [returnDirect] whether to return the tool's output directly. /// Setting this to true means that after the tool is called, @@ -46,6 +49,7 @@ abstract base class StringTool required final String name, required final String description, final String inputDescription = 'The input to the tool', + final bool strict = false, required final FutureOr Function(String input) func, final bool returnDirect = false, final String Function(ToolException)? handleToolError, @@ -54,6 +58,7 @@ abstract base class StringTool name: name, description: description, inputDescription: inputDescription, + strict: strict, func: func, returnDirect: returnDirect, handleToolError: handleToolError, @@ -84,6 +89,7 @@ final class _StringToolFunc required super.name, required super.description, super.inputDescription, + required super.strict, required FutureOr Function(String) func, super.returnDirect = false, super.handleToolError, diff --git a/packages/langchain_core/lib/src/tools/types.dart b/packages/langchain_core/lib/src/tools/types.dart index e533b480..a51b3b49 100644 --- a/packages/langchain_core/lib/src/tools/types.dart +++ b/packages/langchain_core/lib/src/tools/types.dart @@ -6,7 +6,9 @@ import '../langchain/types.dart'; /// {@endtemplate} class ToolOptions extends BaseLangChainOptions { /// {@macro tool_options} - const ToolOptions(); + const ToolOptions({ + super.concurrencyLimit, + }); } /// {@template tool_exception} diff --git a/packages/langchain_core/lib/src/utils/retry_client.dart b/packages/langchain_core/lib/src/utils/retry_client.dart new file mode 100644 index 00000000..9cd15317 --- /dev/null +++ b/packages/langchain_core/lib/src/utils/retry_client.dart @@ -0,0 +1,92 @@ +import 'dart:async'; +import 'dart:math'; + +/// {@template retry_options} +/// Options to pass into [retryClient] to control the retry behavior. +/// {@endtemplate} +class RetryOptions { + /// {@macro retry_options} + RetryOptions({ + required this.maxRetries, + required this.addJitter, + this.retryIf, + this.delayDurations, + }); + + /// The maximum number of attempts to retry. + final int maxRetries; + + /// An evaluator function that can be used to decide if the function should + /// be retried based on the exception it throws. + /// + /// If you decide not to retry on a particular exception, [retryIf] can return + /// `false` and the retry won't happen. By default [retryIf] is `true` and + /// all exceptions are retried. + final FutureOr Function(Object e)? retryIf; + + /// The function will be retried based on an exponential backoff strategy + /// with a base delay of 1 second. + /// + /// But you can override this behavior by providing an optional list of + /// [delayDurations]`. Each entry in the list corresponds to a specific + /// retry attempt, and the corresponding delay from the list will be used + /// instead of the default exponential delay. + /// + /// For example, if you provide a list of `[2, 4, 8]`, the delays between the + /// first three retries will be 2, 4, and 8 seconds, respectively. + final List? delayDurations; + + /// Whether to add jitter to the exponential backoff. + /// + /// Jitter is a random value added to the delay to prevent multiple clients + /// from retrying at the same time. + final bool addJitter; +} + +/// A client that handles retry logic for a given function. +/// +/// This client takes [RetryOptions] and a function to execute. If the +/// function fails, it will be retried according to the specified options. +/// If it succeeds, the result of the function will be returned. +FutureOr retryClient({ + required RetryOptions options, + required FutureOr Function() fn, +}) async { + const defaultDelay = Duration(seconds: 1); + + for (int attempt = 0; attempt < options.maxRetries; attempt++) { + try { + return await fn(); + } catch (e) { + final isLastAttempt = attempt == options.maxRetries - 1; + final shouldRetry = await options.retryIf?.call(e) ?? true; + + if (isLastAttempt || !shouldRetry) { + rethrow; + } + + final duration = + options.delayDurations?[attempt] ?? defaultDelay * pow(2, attempt); + await _delay(duration, attempt, options.addJitter); + } + } + + // This line should never be reached + throw StateError('Exhausted all retry attempts'); +} + +Future _delay( + final Duration duration, + final int attempt, + final bool addJitter, +) async { + final Duration delay; + if (addJitter) { + final random = Random(); + final jitter = random.nextInt(100); + delay = Duration(milliseconds: duration.inMilliseconds + jitter); + } else { + delay = duration; + } + await Future.delayed(delay); +} diff --git a/packages/langchain_core/lib/src/utils/utils.dart b/packages/langchain_core/lib/src/utils/utils.dart index d439ed98..57924640 100644 --- a/packages/langchain_core/lib/src/utils/utils.dart +++ b/packages/langchain_core/lib/src/utils/utils.dart @@ -1,3 +1,4 @@ export 'chunk.dart'; export 'reduce.dart'; +export 'retry_client.dart'; export 'similarity.dart'; diff --git a/packages/langchain_core/lib/src/vector_stores/base.dart b/packages/langchain_core/lib/src/vector_stores/base.dart index 9ef54df3..3a5ecb51 100644 --- a/packages/langchain_core/lib/src/vector_stores/base.dart +++ b/packages/langchain_core/lib/src/vector_stores/base.dart @@ -45,8 +45,6 @@ abstract class VectorStore { /// Delete by vector ID. /// /// - [ids] is a list of ids to delete. - /// - /// Returns true if the delete was successful. Future delete({required final List ids}); /// Returns docs most similar to query using specified search type. diff --git a/packages/langchain_core/lib/stores.dart b/packages/langchain_core/lib/stores.dart index 2a234153..96eb406a 100644 --- a/packages/langchain_core/lib/stores.dart +++ b/packages/langchain_core/lib/stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to stores. -library stores; +library; export 'src/stores/stores.dart'; diff --git a/packages/langchain_core/lib/tools.dart b/packages/langchain_core/lib/tools.dart index 9d0b95aa..d829f7d5 100644 --- a/packages/langchain_core/lib/tools.dart +++ b/packages/langchain_core/lib/tools.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to tools. -library tools; +library; export 'src/tools/tools.dart'; diff --git a/packages/langchain_core/lib/utils.dart b/packages/langchain_core/lib/utils.dart index cdcc6670..7ceacd01 100644 --- a/packages/langchain_core/lib/utils.dart +++ b/packages/langchain_core/lib/utils.dart @@ -1,4 +1,4 @@ /// Contains core utilities. -library utils; +library; export 'src/utils/utils.dart'; diff --git a/packages/langchain_core/lib/vector_stores.dart b/packages/langchain_core/lib/vector_stores.dart index 35174345..129d296c 100644 --- a/packages/langchain_core/lib/vector_stores.dart +++ b/packages/langchain_core/lib/vector_stores.dart @@ -1,4 +1,4 @@ /// Contains core abstractions related to vector stores. -library vector_stores; +library; export 'src/vector_stores/vector_stores.dart'; diff --git a/packages/langchain_core/pubspec.yaml b/packages/langchain_core/pubspec.yaml index 65650ce8..69e8bac9 100644 --- a/packages/langchain_core/pubspec.yaml +++ b/packages/langchain_core/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_core description: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL). -version: 0.3.1 +version: 0.3.6 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,15 +13,15 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: async: ^2.11.0 - collection: '>=1.17.0 <1.19.0' - cross_file: ^0.3.4+1 + collection: ^1.18.0 + cross_file: ^0.3.4+2 crypto: ^3.0.3 meta: ^1.11.0 - rxdart: ^0.27.7 + rxdart: ">=0.27.7 <0.29.0" dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_core/test/runnables/binding_test.dart b/packages/langchain_core/test/runnables/binding_test.dart index af192ed2..e64f0042 100644 --- a/packages/langchain_core/test/runnables/binding_test.dart +++ b/packages/langchain_core/test/runnables/binding_test.dart @@ -4,6 +4,7 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; import 'package:langchain_core/runnables.dart'; +import 'package:langchain_core/tools.dart'; import 'package:test/test.dart'; void main() { @@ -20,6 +21,32 @@ void main() { expect(res, 'Hello '); }); + test('Chaining bind calls', () async { + final model = FakeChatModel( + responses: ['a', 'b'], + defaultOptions: const FakeChatModelOptions( + model: 'modelA', + metadata: {'foo': 'bar'}, + ), + ); + + final res1 = await model.invoke(PromptValue.string('1')); + expect(res1.metadata['model'], 'modelA'); + expect(res1.metadata['foo'], 'bar'); + + final chain2 = model.bind(const FakeChatModelOptions(model: 'modelB')); + final res2 = await chain2.invoke(PromptValue.string('2')); + expect(res2.metadata['model'], 'modelB'); + expect(res2.metadata['foo'], 'bar'); + + final chain3 = chain2.bind( + const FakeChatModelOptions(metadata: {'foo': 'baz'}), + ); + final res3 = await chain3.invoke(PromptValue.string('3')); + expect(res3.metadata['model'], 'modelB'); + expect(res3.metadata['foo'], 'baz'); + }); + test('Streaming RunnableBinding', () async { final prompt = PromptTemplate.fromTemplate('Hello {input}'); const model = _FakeOptionsChatModel(); @@ -97,4 +124,14 @@ class _FakeOptionsChatModelOptions extends ChatModelOptions { const _FakeOptionsChatModelOptions(this.stop); final String stop; + + @override + ChatModelOptions copyWith({ + final String? model, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, + }) { + return _FakeOptionsChatModelOptions(stop); + } } diff --git a/packages/langchain_core/test/runnables/fallbacks_test.dart b/packages/langchain_core/test/runnables/fallbacks_test.dart new file mode 100644 index 00000000..7bc7a72d --- /dev/null +++ b/packages/langchain_core/test/runnables/fallbacks_test.dart @@ -0,0 +1,102 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/output_parsers.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:test/test.dart'; + +void main() { + group('RunnableFallback tests', () { + late FakeEchoChatModel model; + late FakeChatModel fallbackModel; + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + final input = PromptValue.string('why is the sky blue'); + + setUp(() { + model = const FakeEchoChatModel(); + fallbackModel = FakeChatModel(responses: ['fallback response']); + }); + + test('RunnableFallback should return main runnable output', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Should call fallback runnable if main runnable fails', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final res = await modelWithFallback.invoke(input); + expect(res.output.content, 'fallback response'); + }); + + test('Test batch response of main runnable in RunnableFallback', () async { + const model = FakeEchoChatModel(); + const fallbackModel = FakeEchoChatModel(); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(model).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Test fallbacks response in batch', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(fallbackModel); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel).withFallbacks([fallbackChain]); + final res = await chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ); + expect(res.first.output.content, 'fallback response'); + }); + + test('Should throw error if none of runnable returned output', () async { + final brokenModel1 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final brokenModel2 = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final fallbackChain = promptTemplate.pipe(brokenModel2); + final chainWithFallbacks = + promptTemplate.pipe(brokenModel1).withFallbacks([fallbackChain]); + expect( + () async => chainWithFallbacks.batch( + [ + {'topic': 'bears'}, + ], + ), + throwsException, + ); + }); + + test('Test stream response of main runnable in RunnableFallback', () async { + final modelWithFallback = model.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), 'w|h|y| |i|s| |t|h|e| |s|k|y| |b|l|u|e'); + }); + + test('Test fallbacks response in stream', () async { + final brokenModel = model.bind( + const FakeEchoChatModelOptions(throwRandomError: true), + ); + final modelWithFallback = brokenModel.withFallbacks([fallbackModel]); + final chain = modelWithFallback.pipe(const StringOutputParser()); + final res = await chain.stream(input).toList(); + expect(res.join('|'), endsWith('f|a|l|l|b|a|c|k| |r|e|s|p|o|n|s|e')); + }); + }); +} diff --git a/packages/langchain_core/test/runnables/map_test.dart b/packages/langchain_core/test/runnables/map_test.dart index 98a4a3ff..e65dc73a 100644 --- a/packages/langchain_core/test/runnables/map_test.dart +++ b/packages/langchain_core/test/runnables/map_test.dart @@ -1,5 +1,4 @@ // ignore_for_file: unused_element -import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/output_parsers.dart'; @@ -43,11 +42,11 @@ void main() { final left = streamList .map((final it) => it['left']) // - .whereNotNull() + .nonNulls .join(); final right = streamList .map((final it) => it['right']) // - .whereNotNull() + .nonNulls .join(); expect(left, 'Hello world!'); diff --git a/packages/langchain_core/test/runnables/retry_test.dart b/packages/langchain_core/test/runnables/retry_test.dart new file mode 100644 index 00000000..f1e8f625 --- /dev/null +++ b/packages/langchain_core/test/runnables/retry_test.dart @@ -0,0 +1,87 @@ +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/runnables.dart'; +import 'package:test/test.dart'; + +void main() { + group('Runnable Retry Test', () { + late FakeEchoChatModel model; + final input = PromptValue.string('why is the sky blue'); + final promptTemplate = + ChatPromptTemplate.fromTemplate('tell me a joke about {topic}'); + + setUp(() { + model = const FakeEchoChatModel(); + }); + + test('Runnable retry should return output for invoke', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.output.content, 'why is the sky blue'); + }); + + test('Runnable retry should return output for batch', () async { + final chain = promptTemplate.pipe(model); + final chainWithRetry = chain.withRetry(); + final res = await chainWithRetry.batch( + [ + {'topic': 'bears'}, + {'topic': 'cats'}, + ], + ); + expect(res[0].output.content, 'tell me a joke about bears'); + expect(res[1].output.content, 'tell me a joke about cats'); + }); + + test('Should retry based RetryOptions, maxRetries = 2', () async { + final modelWithRetry = model.withRetry(maxRetries: 2); + expect( + () async => modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ), + throwsException, + ); + }); + + test('Should return the output after successful retry', () async { + int count = 0; + final modelWithRetry = model.pipe( + Runnable.fromFunction( + invoke: (input, opt) { + if (count++ < 1) { + throw Exception('Random error'); + } + return input; + }, + ), + ).withRetry(maxRetries: 2); + final res = await modelWithRetry.invoke(input); + expect(res.outputAsString, input.toString()); + expect(count, 2); + }); + + test('Should not retry if retryIf returned false', () async { + late String error; + final modelWithRetry = model.withRetry( + maxRetries: 3, + retryIf: (e) { + if (e.toString() == 'Exception: Random error') { + return false; + } else { + return true; + } + }, + ); + try { + await modelWithRetry.invoke( + input, + options: const FakeEchoChatModelOptions(throwRandomError: true), + ); + } catch (e) { + error = e.toString(); + } + expect(error, 'Exception: Random error'); + }); + }); +} diff --git a/packages/langchain_firebase/CHANGELOG.md b/packages/langchain_firebase/CHANGELOG.md index 862156b6..ab291e7e 100644 --- a/packages/langchain_firebase/CHANGELOG.md +++ b/packages/langchain_firebase/CHANGELOG.md @@ -1,3 +1,44 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.1+2 + + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +## 0.2.1+1 + + - Update a dependency to the latest release. + +## 0.2.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.2.0 + +> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34)) + - **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16)) + - **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44)) + - **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7)) + +## 0.1.0+3 + + - Update a dependency to the latest release. + +## 0.1.0+2 + + - Update a dependency to the latest release. + +## 0.1.0+1 + + - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c)) + ## 0.1.0 - **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f)) diff --git a/packages/langchain_firebase/example/lib/main.dart b/packages/langchain_firebase/example/lib/main.dart index 7cbb5e8e..f9d5db92 100644 --- a/packages/langchain_firebase/example/lib/main.dart +++ b/packages/langchain_firebase/example/lib/main.dart @@ -155,7 +155,7 @@ class _ChatWidgetState extends State { _model = ChatFirebaseVertexAI( defaultOptions: ChatFirebaseVertexAIOptions( - model: 'gemini-1.5-pro-preview-0514', + model: 'gemini-1.5-pro', tools: [exchangeRateTool], ), // location: 'us-central1', @@ -580,7 +580,7 @@ class MessageWidget extends StatelessWidget { decoration: BoxDecoration( color: isFromUser ? Theme.of(context).colorScheme.primaryContainer - : Theme.of(context).colorScheme.surfaceVariant, + : Theme.of(context).colorScheme.surfaceContainerHighest, borderRadius: BorderRadius.circular(18), ), padding: const EdgeInsets.symmetric( diff --git a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift index 7bade716..c6c180db 100644 --- a/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -6,9 +6,11 @@ import FlutterMacOS import Foundation import firebase_app_check +import firebase_auth import firebase_core func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAppCheckPlugin")) + FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseAuthPlugin")) FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: "FLTFirebaseCorePlugin")) } diff --git a/packages/langchain_firebase/example/pubspec.lock b/packages/langchain_firebase/example/pubspec.lock index c3e14ba2..02cc232c 100644 --- a/packages/langchain_firebase/example/pubspec.lock +++ b/packages/langchain_firebase/example/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: b1595874fbc8f7a50da90f5d8f327bb0bfd6a95dc906c390efe991540c3b54aa url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.40" args: dependency: transitive description: @@ -61,10 +61,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -93,58 +93,82 @@ packages: dependency: transitive description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: edefbd312d2f4c52ab6a62d4efca512012bcc580f152c856a5730bfabcf8a924 url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+34" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: "2c2377ecf922514c540c2d4a9c06e46830a0706fdfc3d59b7ade9b75843b81c5" url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.2+12" + firebase_auth: + dependency: transitive + description: + name: firebase_auth + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" + url: "https://pub.dev" + source: hosted + version: "5.1.4" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "0408e2ed74b1afa0490a93aa041fe90d7573af7ffc59a641edc6c5b5c1b8d2a4" + url: "https://pub.dev" + source: hosted + version: "7.4.3" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "7e0c6d0fa8c5c1b2ae126a78f2d1a206a77a913f78d20f155487bf746162dccc" + url: "https://pub.dev" + source: hosted + version: "5.12.5" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: "3187f4f8e49968573fd7403011dca67ba95aae419bc0d8131500fae160d94f92" url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.3.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: "3c3a1e92d6f4916c32deea79c4a7587aa0e9dbbe5889c7a16afcf005a485ee02" url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.2.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: e8d1e22de72cb21cdcfc5eed7acddab3e99cd83f3b317f54f7a96c32f25fd11e url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.17.4" firebase_vertexai: dependency: transitive description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -170,10 +194,10 @@ packages: dependency: "direct main" description: name: flutter_markdown - sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + sha256: a23c41ee57573e62fc2190a1f36a0480c4d90bde3a8a8d7126e5d5992fb53fb7 url: "https://pub.dev" source: hosted - version: "0.6.23" + version: "0.7.3+1" flutter_test: dependency: "direct dev" description: flutter @@ -188,18 +212,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -214,45 +238,45 @@ packages: path: "../../langchain" relative: true source: path - version: "0.7.1" + version: "0.7.6" langchain_core: dependency: "direct overridden" description: path: "../../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" langchain_firebase: dependency: "direct main" description: path: ".." relative: true source: path - version: "0.1.0" + version: "0.2.1+2" leak_tracker: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" lints: dependency: transitive description: @@ -289,10 +313,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -313,10 +337,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter @@ -374,10 +398,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -390,10 +414,10 @@ packages: dependency: transitive description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -406,10 +430,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: @@ -419,5 +443,5 @@ packages: source: hosted version: "0.5.1" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/example/pubspec.yaml b/packages/langchain_firebase/example/pubspec.yaml index 76900f0d..2c34d324 100644 --- a/packages/langchain_firebase/example/pubspec.yaml +++ b/packages/langchain_firebase/example/pubspec.yaml @@ -4,17 +4,17 @@ version: 1.0.0+1 publish_to: 'none' environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: cupertino_icons: ^1.0.6 - firebase_core: ^2.31.0 + firebase_core: ^3.3.0 flutter: sdk: flutter - flutter_markdown: ^0.6.22 - langchain: 0.7.1 - langchain_firebase: 0.1.0 + flutter_markdown: ^0.7.3 + langchain: 0.7.6 + langchain_firebase: 0.2.1+2 dev_dependencies: flutter_lints: ^3.0.0 diff --git a/packages/langchain_firebase/example/pubspec_overrides.yaml b/packages/langchain_firebase/example/pubspec_overrides.yaml index 35cb2da2..fb671352 100644 --- a/packages/langchain_firebase/example/pubspec_overrides.yaml +++ b/packages/langchain_firebase/example/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_core,langchain_firebase,langchain +# melos_managed_dependency_overrides: langchain,langchain_core,langchain_firebase dependency_overrides: langchain: path: ../../langchain diff --git a/packages/langchain_firebase/example/web/flutter_bootstrap.js b/packages/langchain_firebase/example/web/flutter_bootstrap.js new file mode 100644 index 00000000..8ce49d8a --- /dev/null +++ b/packages/langchain_firebase/example/web/flutter_bootstrap.js @@ -0,0 +1,12 @@ +{{flutter_js}} +{{flutter_build_config}} + +_flutter.loader.load({ + serviceWorkerSettings: { + serviceWorkerVersion: {{flutter_service_worker_version}}, + }, + onEntrypointLoaded: async function(engineInitializer) { + const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true}); + await appRunner.runApp(); + }, +}); diff --git a/packages/langchain_firebase/example/web/index.html b/packages/langchain_firebase/example/web/index.html index 27ef6265..cce674b5 100644 --- a/packages/langchain_firebase/example/web/index.html +++ b/packages/langchain_firebase/example/web/index.html @@ -1,61 +1,25 @@ - - + - - - - + + + + - - - example - + - - - + VertexAI for Firebase in LangChain.dart + - + diff --git a/packages/langchain_firebase/lib/langchain_firebase.dart b/packages/langchain_firebase/lib/langchain_firebase.dart index 243b71ac..45448a85 100644 --- a/packages/langchain_firebase/lib/langchain_firebase.dart +++ b/packages/langchain_firebase/lib/langchain_firebase.dart @@ -1,4 +1,5 @@ -/// LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). +/// LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). library; export 'src/chat_models/chat_models.dart'; +export 'src/stores/firestore.dart'; diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart index f8c3870d..83ac8d8c 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart @@ -1,10 +1,10 @@ import 'package:collection/collection.dart'; import 'package:firebase_app_check/firebase_app_check.dart'; +import 'package:firebase_auth/firebase_auth.dart'; import 'package:firebase_core/firebase_core.dart'; import 'package:firebase_vertexai/firebase_vertexai.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; -import 'package:langchain_core/tools.dart'; import 'package:uuid/uuid.dart'; import 'mappers.dart'; @@ -36,25 +36,25 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-1.0-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-preview-0514`: +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-preview-0514`: +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 +/// * Max output tokens: 8192 +/// - `gemini-1.0-pro-vision`: +/// * text / image -> text model +/// * Max input token: 16384 +/// * Max output tokens: 2048 +/// - `gemini-1.0-pro` +/// * text -> text model +/// * Max input token: 32760 /// * Max output tokens: 8192 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://firebase.google.com/docs/vertex-ai/gemini-models) +/// Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) /// for the updated list. /// /// ### Call options @@ -111,7 +111,7 @@ import 'types.dart'; /// /// [ChatFirebaseVertexAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: @@ -132,7 +132,7 @@ import 'types.dart'; /// ); /// final chatModel = ChatFirebaseVertexAI( /// defaultOptions: ChatFirebaseVertexAIOptions( -/// model: 'gemini-1.5-pro-preview-0514', +/// model: 'gemini-1.5-pro', /// temperature: 0, /// tools: [tool], /// ), @@ -154,10 +154,11 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// - [ChatFirebaseVertexAI.location] ChatFirebaseVertexAI({ super.defaultOptions = const ChatFirebaseVertexAIOptions( - model: 'gemini-1.0-pro', + model: defaultModel, ), this.app, this.appCheck, + this.auth, this.options, this.location, }) : _currentModel = defaultOptions.model ?? '' { @@ -172,6 +173,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// The optional [FirebaseAppCheck] to use to protect the project from abuse. final FirebaseAppCheck? appCheck; + /// The optional [FirebaseAuth] to use for authentication. + final FirebaseAuth? auth; + /// Configuration parameters for sending requests to Firebase. final RequestOptions? options; @@ -184,20 +188,17 @@ class ChatFirebaseVertexAI extends BaseChatModel { /// A UUID generator. late final Uuid _uuid = const Uuid(); - @override - String get modelType => 'chat-firebase-vertex-ai'; - /// The current model set in [_firebaseClient]; String _currentModel; /// The current system instruction set in [_firebaseClient]; String? _currentSystemInstruction; - /// The current tools set in [_firebaseClient]; - List? _currentTools; + @override + String get modelType => 'chat-firebase-vertex-ai'; - /// The current tool choice set in [_firebaseClient]; - ChatToolChoice? _currentToolChoice; + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; @override Future invoke( @@ -205,12 +206,14 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) async { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); final completion = await _firebaseClient.generateContent( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ); return completion.toChatResult(id, model); } @@ -221,13 +224,15 @@ class ChatFirebaseVertexAI extends BaseChatModel { final ChatFirebaseVertexAIOptions? options, }) { final id = _uuid.v4(); - final (model, prompt, safetySettings, generationConfig) = + final (model, prompt, safetySettings, generationConfig, tools, toolConfig) = _generateCompletionRequest(input.toChatMessages(), options: options); return _firebaseClient .generateContentStream( prompt, safetySettings: safetySettings, generationConfig: generationConfig, + tools: tools, + toolConfig: toolConfig, ) .map((final completion) => completion.toChatResult(id, model)); } @@ -238,6 +243,8 @@ class ChatFirebaseVertexAI extends BaseChatModel { Iterable prompt, List? safetySettings, GenerationConfig? generationConfig, + List? tools, + ToolConfig? toolConfig, ) _generateCompletionRequest( final List messages, { final ChatFirebaseVertexAIOptions? options, @@ -259,7 +266,15 @@ class ChatFirebaseVertexAI extends BaseChatModel { temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + // responseSchema not supported yet + // responseSchema: + // (options?.responseSchema ?? defaultOptions.responseSchema) + // ?.toSchema(), ), + (options?.tools ?? defaultOptions.tools)?.toToolList(), + (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), ); } @@ -288,8 +303,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { GenerativeModel _createFirebaseClient( final String model, { final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, }) { return FirebaseVertexAI.instanceFor( app: app, @@ -300,8 +313,6 @@ class ChatFirebaseVertexAI extends BaseChatModel { model: model, systemInstruction: systemInstruction != null ? Content.system(systemInstruction) : null, - tools: tools?.toToolList(), - toolConfig: toolChoice?.toToolConfig(), ); } @@ -309,14 +320,10 @@ class ChatFirebaseVertexAI extends BaseChatModel { void _recreateFirebaseClient( final String model, final String? systemInstruction, - final List? tools, - final ChatToolChoice? toolChoice, ) { _firebaseClient = _createFirebaseClient( model, systemInstruction: systemInstruction, - tools: tools, - toolChoice: toolChoice, ); } @@ -325,16 +332,12 @@ class ChatFirebaseVertexAI extends BaseChatModel { final List messages, final ChatFirebaseVertexAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString : null; - final tools = options?.tools ?? defaultOptions.tools; - final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice; - bool recreate = false; if (model != _currentModel) { _currentModel = model; @@ -344,17 +347,9 @@ class ChatFirebaseVertexAI extends BaseChatModel { _currentSystemInstruction = systemInstruction; recreate = true; } - if (!const ListEquality().equals(tools, _currentTools)) { - _currentTools = tools; - recreate = true; - } - if (toolChoice != _currentToolChoice) { - _currentToolChoice = toolChoice; - recreate = true; - } if (recreate) { - _recreateFirebaseClient(model, systemInstruction, tools, toolChoice); + _recreateFirebaseClient(model, systemInstruction); } } } diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart index 05840e8f..9c55d409 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart @@ -134,11 +134,11 @@ extension GenerateContentResponseMapper on f.GenerateContentResponse { .toList(growable: false), 'finish_message': candidate.finishMessage, }, - usage: const LanguageModelUsage( - // promptTokens: usageMetadata?.promptTokenCount, // not yet supported - // responseTokens: usageMetadata?.candidatesTokenCount, - // totalTokens: usageMetadata?.totalTokenCount, - ), + usage: LanguageModelUsage( + promptTokens: usageMetadata?.promptTokenCount, + responseTokens: usageMetadata?.candidatesTokenCount, + totalTokens: usageMetadata?.totalTokenCount, + ), ); } @@ -197,14 +197,17 @@ extension ChatToolListMapper on List { (tool) => f.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - f.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + f.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -217,45 +220,38 @@ extension ChatToolListMapper on List { switch (type) { case 'string': if (enumValues != null) { - return f.Schema( - f.SchemaType.string, + return f.Schema.enumString( enumValues: enumValues, description: description, nullable: nullable, - format: 'enum', ); } else { - return f.Schema( - f.SchemaType.string, + return f.Schema.string( description: description, nullable: nullable, ); } case 'number': - return f.Schema( - f.SchemaType.number, + return f.Schema.number( description: description, nullable: nullable, format: format, ); case 'integer': - return f.Schema( - f.SchemaType.integer, + return f.Schema.integer( description: description, nullable: nullable, format: format, ); case 'boolean': - return f.Schema( - f.SchemaType.boolean, + return f.Schema.boolean( description: description, nullable: nullable, ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); - return f.Schema( - f.SchemaType.array, + final itemsSchema = items.toSchema(); + return f.Schema.array( description: description, nullable: nullable, items: itemsSchema, @@ -265,10 +261,12 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); - return f.Schema( - f.SchemaType.object, + return f.Schema.object( properties: propertiesSchema, requiredProperties: requiredProperties, description: description, @@ -295,6 +293,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: f.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => f.ToolConfig( + functionCallingConfig: f.FunctionCallingConfig( + mode: f.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => f.ToolConfig( functionCallingConfig: f.FunctionCallingConfig( mode: f.FunctionCallingMode.any, diff --git a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart index d41e4032..fc28f66e 100644 --- a/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart @@ -1,30 +1,32 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_firebase_vertex_ai_options} /// Options to pass into the Vertex AI for Firebase model. +/// +/// You can find a list of available models here: +/// https://firebase.google.com/docs/vertex-ai/gemini-models /// {@endtemplate} +@immutable class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// {@macro chat_firebase_vertex_ai_options} const ChatFirebaseVertexAIOptions({ - this.model = 'gemini-1.0-pro', + super.model, this.topP, this.topK, this.candidateCount, this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, this.safetySettings, super.tools, super.toolChoice, super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: - /// https://firebase.google.com/docs/vertex-ai/gemini-models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -69,6 +71,13 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + /// A list of unique [ChatFirebaseVertexAISafetySetting] instances for blocking /// unsafe content. /// @@ -81,8 +90,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - /// Creates a copy of this [ChatFirebaseVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatFirebaseVertexAIOptions copyWith({ final String? model, final double? topP, @@ -91,7 +99,11 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { final int? maxOutputTokens, final double? temperature, final List? stopSequences, + final String? responseMimeType, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatFirebaseVertexAIOptions( model: model ?? this.model, @@ -101,9 +113,68 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens, temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, + responseMimeType: responseMimeType ?? this.responseMimeType, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + ChatFirebaseVertexAIOptions merge( + covariant final ChatFirebaseVertexAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + responseMimeType: other?.responseMimeType, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final ChatFirebaseVertexAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + responseMimeType == other.responseMimeType && + const ListEquality() + .equals(safetySettings, other.safetySettings) && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + const ListEquality().hash(stopSequences) ^ + responseMimeType.hashCode ^ + const ListEquality() + .hash(safetySettings) ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} @@ -111,6 +182,7 @@ class ChatFirebaseVertexAIOptions extends ChatModelOptions { /// Passing a safety setting for a category changes the allowed probability that /// content is blocked. /// {@endtemplate} +@immutable class ChatFirebaseVertexAISafetySetting { /// {@macro chat_google_generative_ai_safety_setting} const ChatFirebaseVertexAISafetySetting({ @@ -123,6 +195,28 @@ class ChatFirebaseVertexAISafetySetting { /// Controls the probability threshold at which harm is blocked. final ChatFirebaseVertexAISafetySettingThreshold threshold; + + /// Creates a copy of this [ChatFirebaseVertexAISafetySetting] object with + /// the given fields replaced with the new values. + ChatFirebaseVertexAISafetySetting copyWith({ + final ChatFirebaseVertexAISafetySettingCategory? category, + final ChatFirebaseVertexAISafetySettingThreshold? threshold, + }) { + return ChatFirebaseVertexAISafetySetting( + category: category ?? this.category, + threshold: threshold ?? this.threshold, + ); + } + + @override + bool operator ==(covariant final ChatFirebaseVertexAISafetySetting other) { + return category == other.category && threshold == other.threshold; + } + + @override + int get hashCode { + return category.hashCode ^ threshold.hashCode; + } } /// Safety settings categorizes. diff --git a/packages/langchain_firebase/lib/src/stores/firestore.dart b/packages/langchain_firebase/lib/src/stores/firestore.dart new file mode 100644 index 00000000..e2db4685 --- /dev/null +++ b/packages/langchain_firebase/lib/src/stores/firestore.dart @@ -0,0 +1,171 @@ +import 'package:cloud_firestore/cloud_firestore.dart'; +import 'package:firebase_core/firebase_core.dart'; +import 'package:langchain_core/chat_history.dart'; +import 'package:langchain_core/chat_models.dart'; + +///Can be uses to store ChatMessages in firestore +final class FirestoreChatMessageHistory extends BaseChatMessageHistory { + ///Constructor + FirestoreChatMessageHistory({ + required this.collections, + required this.options, + required this.userId, + }) { + collectionReference = FirebaseFirestore.instance.collection(collections); + } + + ///String list of collection names + String collections; + + ///Firebase Options + FirebaseOptions options; + + /// Used to identify the sender of each message + String userId; + + ///Firestore instance + late CollectionReference> collectionReference; + + @override + Future addChatMessage(final ChatMessage message) async { + final FirestoreChatMessageField messageField = + FirestoreChatMessageField(message: message); + + await collectionReference.doc().set(messageField.toJson(userId)); + } + + @override + Future clear() async { + final snapshot = await collectionReference.get(); + + //Delete all docs in firestore + for (final doc in snapshot.docs) { + await doc.reference.delete(); + } + } + + @override + Future> getChatMessages() async { + //Get chat messages in ascending order so the newest message is the last in the list + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: false) + .get(); + + //Take each doc and add it to the conversation list. + final List conversation = snapshot.docs.map((final doc) { + return FirestoreChatMessageField.fromJson(doc.data()).message; + }).toList(); + + return conversation; + } + + /// Removes and returns the first (oldest) element of the history. + /// + /// The history must not be empty when this method is called. + @override + Future removeFirst() async { + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: false) + .limit(1) + .get(); + + //get oldest document + final oldest = snapshot.docs.first; + + //Delete doc in firestore + await oldest.reference.delete(); + + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(oldest.data()).message; + } + + /// Removes and returns the last (newest) element of the history. + /// + /// The history must not be empty when this method is called. + @override + Future removeLast() async { + final snapshot = await collectionReference + .where('createdBy', isEqualTo: userId) + .orderBy('createdAt', descending: true) + .limit(1) + .get(); + + //get newest document + final newest = snapshot.docs.first; + + //Delete doc in firestore + await newest.reference.delete(); + + //Create FirestoreChatMessageField and return ChatMessage + return FirestoreChatMessageField.fromJson(newest.data()).message; + } +} + +///This class makes sure that every chat message on firestore has a timestamp +///This will enable fetching document with orderBy(created). +class FirestoreChatMessageField { + ///Will contain the chat message + final ChatMessage message; + + ///Timestamp to keep messages in order. + Timestamp createdAt = Timestamp.now(); + + ///Constructor + FirestoreChatMessageField({ + required this.message, + final Timestamp? createdAt, + }) { + if (createdAt == null) { + this.createdAt = Timestamp.now(); + } else { + this.createdAt = createdAt; + } + } + + ///FromJson will convert a json map to a FirestoreChatMessageField + factory FirestoreChatMessageField.fromJson(final Map json) { + // ignore: avoid_dynamic_calls + switch (json['message']['type']) { + case SystemChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: SystemChatMessage.fromJson(json['message']), + createdAt: json['createdAt'], + ); + case HumanChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: HumanChatMessage.fromJson(json['message']), + createdAt: json['createdAt'], + ); + + case AIChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: AIChatMessage.fromJson(json['message']), + createdAt: json['createdAt'], + ); + + case ToolChatMessage.defaultPrefix: + return FirestoreChatMessageField( + message: ToolChatMessage.fromJson(json['message']), + createdAt: json['createdAt'], + ); + + case 'Custom': + return FirestoreChatMessageField( + message: CustomChatMessage.fromJson(json['message']), + createdAt: json['createdAt'], + ); + default: + // ignore: avoid_dynamic_calls + throw FormatException("INVALID JSON FILE = ${json['message']['type']}"); + } + } + + /// Will be used to transform the class into a json object + Map toJson(String createdBy) => { + 'message': message.toJson(), + 'createdAt': createdAt, + 'createdBy': createdBy, + }; +} diff --git a/packages/langchain_firebase/pubspec.lock b/packages/langchain_firebase/pubspec.lock index 52f48436..deafd770 100644 --- a/packages/langchain_firebase/pubspec.lock +++ b/packages/langchain_firebase/pubspec.lock @@ -5,10 +5,10 @@ packages: dependency: transitive description: name: _flutterfire_internals - sha256: "2350805d7afefb0efe7acd325cb19d3ae8ba4039b906eade3807ffb69938a01f" + sha256: "5534e701a2c505fed1f0799e652dd6ae23bd4d2c4cf797220e5ced5764a7c1c2" url: "https://pub.dev" source: hosted - version: "1.3.33" + version: "1.3.44" async: dependency: transitive description: @@ -41,6 +41,30 @@ packages: url: "https://pub.dev" source: hosted version: "1.1.1" + cloud_firestore: + dependency: "direct main" + description: + name: cloud_firestore + sha256: a08d0f4aae40e63e7a57102de890d5d3c93d719ce38985b2a36c2672283af7d2 + url: "https://pub.dev" + source: hosted + version: "5.4.3" + cloud_firestore_platform_interface: + dependency: transitive + description: + name: cloud_firestore_platform_interface + sha256: "884fa34c6be2d9c7c1f4af86f90f36c0a3b3afef585a12b350a5d15368e7ec7a" + url: "https://pub.dev" + source: hosted + version: "6.4.3" + cloud_firestore_web: + dependency: transitive + description: + name: cloud_firestore_web + sha256: "6e621bbcc999f32db0bc6bfcb18d9991617ec20f8d6bf51b6a1571f5c324fafd" + url: "https://pub.dev" + source: hosted + version: "4.3.2" collection: dependency: "direct main" description: @@ -53,10 +77,10 @@ packages: dependency: transitive description: name: cross_file - sha256: "55d7b444feb71301ef6b8838dbc1ae02e63dd48c8773f3810ff53bb1e2945b32" + sha256: "7caf6a750a0c04effbb52a676dce9a4a592e10ad35c34d6d2d0e4811160d5670" url: "https://pub.dev" source: hosted - version: "0.3.4+1" + version: "0.3.4+2" crypto: dependency: transitive description: @@ -77,58 +101,82 @@ packages: dependency: "direct main" description: name: firebase_app_check - sha256: f66b67027de548b6f8b61c7aa752a24804104e7f463723d202ae9c6d9520fa6b + sha256: "8314938830d6b47217e369664567f6d8a1e77603448e1dbdaf4f7d8c2111ff5c" url: "https://pub.dev" source: hosted - version: "0.2.2+5" + version: "0.3.0+4" firebase_app_check_platform_interface: dependency: transitive description: name: firebase_app_check_platform_interface - sha256: c06fcb2381ff2566f4ebe1a1df2e1798de3d0fad531f673e8539368f33cf6b34 + sha256: "8dbb826d99c67512212331331461ee142e46645740f1c1209706ca2f72958e57" url: "https://pub.dev" source: hosted - version: "0.1.0+27" + version: "0.1.0+38" firebase_app_check_web: dependency: transitive description: name: firebase_app_check_web - sha256: "88c8d792d429a43767461312f29baa5d3b76cd0453cf48dd008f8f94393221c1" + sha256: "0d889be9adf525fea6791ab8b137ee3c152c28d126115408768bd83ab5e7e46d" url: "https://pub.dev" source: hosted - version: "0.1.2+5" + version: "0.1.3+2" + firebase_auth: + dependency: "direct main" + description: + name: firebase_auth + sha256: "2457ac6cbc152fa464aad3fb35f98039b0c4ab8e9bedf476672508b291bdbc3a" + url: "https://pub.dev" + source: hosted + version: "5.1.4" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: "78966c2ef774f5bf2a8381a307222867e9ece3509110500f7a138c115926aa65" + url: "https://pub.dev" + source: hosted + version: "7.4.7" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: "77ad3b252badedd3f08dfa21a4c7fe244be96c6da3a4067f253b13ea5d32424c" + url: "https://pub.dev" + source: hosted + version: "5.13.2" firebase_core: dependency: "direct main" description: name: firebase_core - sha256: "372d94ced114b9c40cb85e18c50ac94a7e998c8eec630c50d7aec047847d27bf" + sha256: "51dfe2fbf3a984787a2e7b8592f2f05c986bfedd6fdacea3f9e0a7beb334de96" url: "https://pub.dev" source: hosted - version: "2.31.0" + version: "3.6.0" firebase_core_platform_interface: dependency: transitive description: name: firebase_core_platform_interface - sha256: c437ae5d17e6b5cc7981cf6fd458a5db4d12979905f9aafd1fea930428a9fe63 + sha256: e30da58198a6d4b49d5bce4e852f985c32cb10db329ebef9473db2b9f09ce810 url: "https://pub.dev" source: hosted - version: "5.0.0" + version: "5.3.0" firebase_core_web: dependency: transitive description: name: firebase_core_web - sha256: "43d9e951ac52b87ae9cc38ecdcca1e8fa7b52a1dd26a96085ba41ce5108db8e9" + sha256: f967a7138f5d2ffb1ce15950e2a382924239eaa521150a8f144af34e68b3b3e5 url: "https://pub.dev" source: hosted - version: "2.17.0" + version: "2.18.1" firebase_vertexai: dependency: "direct main" description: name: firebase_vertexai - sha256: "6e61f6717bee3ab563e8e506e0fed98761f98c181626c62d924d06598786e95e" + sha256: ad34f7a87d870949e92851f4c73b7e15f808fd4717ed899fa7b4813fffe74831 url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.2.2+4" fixnum: dependency: transitive description: @@ -156,18 +204,18 @@ packages: dependency: transitive description: name: google_generative_ai - sha256: bb7d3480b05afb3b1f2459b52893cb22f69ded4e2fb853e212437123c457f1be + sha256: e2f4c0ac13f0898f670ce5ac0dc4501ebe09b96f9d59163724380d9aa82065be url: "https://pub.dev" source: hosted - version: "0.4.0" + version: "0.4.4" http: dependency: transitive description: name: http - sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" http_parser: dependency: transitive description: @@ -182,31 +230,31 @@ packages: path: "../langchain_core" relative: true source: path - version: "0.3.1" + version: "0.3.6" leak_tracker: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" matcher: dependency: transitive description: @@ -227,10 +275,10 @@ packages: dependency: "direct main" description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: transitive description: @@ -251,10 +299,10 @@ packages: dependency: transitive description: name: rxdart - sha256: "0c7c0cedd93788d996e33041ffecda924cc54389199cde4e6a34b440f50044cb" + sha256: "5c3004a4a8dbb94bd4bf5412a4def4acdaa12e12f269737a5751369e12d1a962" url: "https://pub.dev" source: hosted - version: "0.27.7" + version: "0.28.0" sky_engine: dependency: transitive description: flutter @@ -312,10 +360,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -328,10 +376,10 @@ packages: dependency: "direct main" description: name: uuid - sha256: "814e9e88f21a176ae1359149021870e87f7cddaf633ab678a5d2b0bff7fd1ba8" + sha256: "83d37c7ad7aaf9aa8e275490669535c8080377cfa7a7004c24dfac53afffaa90" url: "https://pub.dev" source: hosted - version: "4.4.0" + version: "4.4.2" vector_math: dependency: transitive description: @@ -344,18 +392,18 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" web: dependency: transitive description: name: web - sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" + sha256: cd3543bd5798f6ad290ea73d210f423502e71900302dde696f8bff84bf89a1cb url: "https://pub.dev" source: hosted - version: "0.5.1" + version: "1.1.0" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.19.0" + dart: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" diff --git a/packages/langchain_firebase/pubspec.yaml b/packages/langchain_firebase/pubspec.yaml index 279bc37a..e34da117 100644 --- a/packages/langchain_firebase/pubspec.yaml +++ b/packages/langchain_firebase/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_firebase -description: LangChain.dart integration module for Firebase (VertexAI for Firebase, Firestore, etc.). -version: 0.1.0 +description: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.). +version: 0.2.1+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,17 +14,19 @@ topics: - firebase environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.19.0" + sdk: ">=3.4.0 <4.0.0" + flutter: ">=3.22.0" dependencies: - collection: '>=1.17.0 <1.19.0' - firebase_app_check: ^0.2.2+5 - firebase_core: ^2.31.0 - firebase_vertexai: ^0.1.0 - langchain_core: ^0.3.1 + collection: ^1.18.0 + firebase_app_check: ^0.3.0 + firebase_auth: ^5.1.0 + firebase_core: ^3.3.0 + firebase_vertexai: ^0.2.2 + cloud_firestore: ^5.4.3 + langchain_core: 0.3.6 meta: ^1.11.0 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: flutter_test: diff --git a/packages/langchain_google/CHANGELOG.md b/packages/langchain_google/CHANGELOG.md index b61c71d8..36d0882e 100644 --- a/packages/langchain_google/CHANGELOG.md +++ b/packages/langchain_google/CHANGELOG.md @@ -1,3 +1,35 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.6.3+1 + + - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981)) + - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d)) + +## 0.6.2 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +## 0.6.1 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.6.0 + +> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default. + + - **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69)) + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + - **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.5.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.5.0 > Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`). diff --git a/packages/langchain_google/example/firestore_memory.dart b/packages/langchain_google/example/firestore_memory.dart new file mode 100644 index 00000000..65f24c66 --- /dev/null +++ b/packages/langchain_google/example/firestore_memory.dart @@ -0,0 +1,87 @@ +import 'package:fake_cloud_firestore/fake_cloud_firestore.dart'; +import 'package:firebase_core/firebase_core.dart'; +import 'package:langchain_core/llms.dart'; +import 'package:langchain_firebase/langchain_firebase.dart'; +import 'package:langchain/langchain.dart'; + +Future main() async { + await _history(); + // await _historyWithNestedCollection(); + // await _chainWithFirestoreHistory(); +} + +Future _history() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Langchain', + options: db.app.options, + userId: userId, + ); + + await history.addHumanChatMessage('hi!'); + await history.addAIChatMessage('whats up?'); + + print(await history.getChatMessages()); +} + +//Nested firestore collection +Future _historyWithNestedCollection() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Users/$userId/Messages', + options: db.app.options, + userId: userId, + ); + + await history.addHumanChatMessage('hi!'); + await history.addAIChatMessage('whats up?'); + + print(await history.getChatMessages()); + // [HumanChatMessage(content='hi!', example=false), + // AIMessage(content='whats up?', example=false)] +} + +//Using in a chain +Future _chainWithFirestoreHistory() async { + final db = FakeFirebaseFirestore(); + const String userId = '1234'; + + // Initialize your firebase app + await Firebase.initializeApp(name: db.app.name, options: db.app.options); + + //Create firestore history to give to ConversationBufferMemory + final FirestoreChatMessageHistory history = FirestoreChatMessageHistory( + collections: 'Users/$userId/Messages', + options: db.app.options, + userId: userId, + ); + + //Create llm instanse + final llm = FakeLLM( + responses: [ + "Hi there! It's nice to meet you. How can I help you today?", + ], + ); + + //Create chain with memory + final ConversationChain conversation = ConversationChain( + llm: llm, + memory: ConversationBufferMemory(chatHistory: history), + ); + + //Call llm + final output1 = await conversation.run('Hi there!'); + + print(output1); + // -> 'Hi there! It's nice to meet you. How can I help you today?' +} diff --git a/packages/langchain_google/lib/langchain_google.dart b/packages/langchain_google/lib/langchain_google.dart index 371e45ad..a4dd4908 100644 --- a/packages/langchain_google/lib/langchain_google.dart +++ b/packages/langchain_google/lib/langchain_google.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). +/// LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). library; export 'src/chat_models/chat_models.dart'; diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart index 02fde0bb..5b41f34d 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart @@ -31,25 +31,26 @@ import 'types.dart'; /// ### Available models /// /// The following models are available: -/// - `gemini-1.0-pro` (or `gemini-pro`): -/// * text -> text model -/// * Max input token: 30720 -/// * Max output tokens: 2048 -/// - `gemini-pro-vision`: -/// * text / image -> text model -/// * Max input token: 12288 -/// * Max output tokens: 4096 -/// - `gemini-1.5-pro-latest`: text / image -> text model +/// - `gemini-1.5-flash`: /// * text / image / audio -> text model /// * Max input token: 1048576 /// * Max output tokens: 8192 -/// - `gemini-1.5-flash-latest`: +/// - `gemini-1.5-pro`: /// * text / image / audio -> text model -/// * Max input token: 1048576 +/// * Max input token: 2097152 +/// * Max output tokens: 8192 +/// - `gemini-1.0-pro` (or `gemini-pro`): +/// * text -> text model +/// * Max input token: 32760 /// * Max output tokens: 8192 +/// - `aqa`: +/// * text -> text model +/// * Max input token: 7168 +/// * Max output tokens: 1024 /// /// Mind that this list may not be up-to-date. -/// Refer to the [documentation](https://ai.google.dev/models) for the updated list. +/// Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) +/// for the updated list. /// /// #### Tuned models /// @@ -118,7 +119,7 @@ import 'types.dart'; /// /// [ChatGoogleGenerativeAI] supports tool calling. /// -/// Check the [docs](https://langchaindart.com/#/modules/model_io/models/chat_models/how_to/tools) +/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) /// for more information on how to use tools. /// /// Example: @@ -211,7 +212,7 @@ class ChatGoogleGenerativeAI final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro', + model: defaultModel, ), }) : _currentModel = defaultOptions.model ?? '', _httpClient = createDefaultHttpClient( @@ -247,15 +248,18 @@ class ChatGoogleGenerativeAI /// Get the API key. String get apiKey => _httpClient.headers['x-goog-api-key'] ?? ''; - @override - String get modelType => 'chat-google-generative-ai'; - /// The current model set in [_googleAiClient]; String _currentModel; /// The current system instruction set in [_googleAiClient]; String? _currentSystemInstruction; + @override + String get modelType => 'chat-google-generative-ai'; + + /// The default model to use unless another is specified. + static const defaultModel = 'gemini-1.5-flash'; + @override Future invoke( final PromptValue input, { @@ -322,6 +326,11 @@ class ChatGoogleGenerativeAI temperature: options?.temperature ?? defaultOptions.temperature, topP: options?.topP ?? defaultOptions.topP, topK: options?.topK ?? defaultOptions.topK, + responseMimeType: + options?.responseMimeType ?? defaultOptions.responseMimeType, + responseSchema: + (options?.responseSchema ?? defaultOptions.responseSchema) + ?.toSchema(), ), (options?.tools ?? defaultOptions.tools)?.toToolList(), (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(), @@ -349,7 +358,7 @@ class ChatGoogleGenerativeAI return tokens.totalTokens; } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _httpClient.close(); } @@ -384,8 +393,7 @@ class ChatGoogleGenerativeAI final List messages, final ChatGoogleGenerativeAIOptions? options, ) { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final systemInstruction = messages.firstOrNull is SystemChatMessage ? messages.firstOrNull?.contentAsString diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart index 8623a2c1..521921ac 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart @@ -1,7 +1,6 @@ // ignore_for_file: public_member_api_docs import 'dart:convert'; -import 'package:collection/collection.dart'; import 'package:google_generative_ai/google_generative_ai.dart' as g; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; @@ -96,7 +95,7 @@ extension GenerateContentResponseMapper on g.GenerateContentResponse { _ => throw AssertionError('Unknown part type: $p'), }, ) - .whereNotNull() + .nonNulls .join('\n'), toolCalls: candidate.content.parts .whereType() @@ -198,14 +197,17 @@ extension ChatToolListMapper on List { (tool) => g.FunctionDeclaration( tool.name, tool.description, - _mapJsonSchemaToSchema(tool.inputJsonSchema), + tool.inputJsonSchema.toSchema(), ), ).toList(growable: false), ), ]; } +} - g.Schema _mapJsonSchemaToSchema(final Map jsonSchema) { +extension SchemaMapper on Map { + g.Schema toSchema() { + final jsonSchema = this; final type = jsonSchema['type'] as String; final description = jsonSchema['description'] as String?; final nullable = jsonSchema['nullable'] as bool?; @@ -248,7 +250,7 @@ extension ChatToolListMapper on List { ); case 'array': if (items != null) { - final itemsSchema = _mapJsonSchemaToSchema(items); + final itemsSchema = items.toSchema(); return g.Schema.array( items: itemsSchema, description: description, @@ -259,7 +261,10 @@ extension ChatToolListMapper on List { case 'object': if (properties != null) { final propertiesSchema = properties.map( - (key, value) => MapEntry(key, _mapJsonSchemaToSchema(value)), + (key, value) => MapEntry( + key, + (value as Map).toSchema(), + ), ); return g.Schema.object( properties: propertiesSchema, @@ -288,6 +293,11 @@ extension ChatToolChoiceMapper on ChatToolChoice { mode: g.FunctionCallingMode.auto, ), ), + ChatToolChoiceRequired() => g.ToolConfig( + functionCallingConfig: g.FunctionCallingConfig( + mode: g.FunctionCallingMode.any, + ), + ), final ChatToolChoiceForced t => g.ToolConfig( functionCallingConfig: g.FunctionCallingConfig( mode: g.FunctionCallingMode.any, diff --git a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart index b3553cab..4c2f4063 100644 --- a/packages/langchain_google/lib/src/chat_models/google_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/google_ai/types.dart @@ -1,29 +1,31 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_google_generative_ai_options} /// Options to pass into the Google Generative AI Chat Model. +/// +/// You can find a list of available models [here](https://ai.google.dev/models). /// {@endtemplate} +@immutable class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// {@macro chat_google_generative_ai_options} const ChatGoogleGenerativeAIOptions({ - this.model = 'gemini-pro', + super.model, this.topP, this.topK, this.candidateCount, this.maxOutputTokens, this.temperature, this.stopSequences, + this.responseMimeType, + this.responseSchema, this.safetySettings, super.tools, super.toolChoice, super.concurrencyLimit, }); - /// The LLM to use. - /// - /// You can find a list of available models here: https://ai.google.dev/models - final String? model; - /// The maximum cumulative probability of tokens to consider when sampling. /// The model uses combined Top-k and nucleus sampling. Tokens are sorted /// based on their assigned probabilities so that only the most likely @@ -68,6 +70,39 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// The stop sequence will not be included as part of the response. final List? stopSequences; + /// Output response mimetype of the generated candidate text. + /// + /// Supported mimetype: + /// - `text/plain`: (default) Text output. + /// - `application/json`: JSON response in the candidates. + final String? responseMimeType; + + /// Output response schema of the generated candidate text. + /// Following the [JSON Schema specification](https://json-schema.org). + /// + /// - Note: This only applies when the specified ``responseMIMEType`` supports + /// a schema; currently this is limited to `application/json`. + /// + /// Example: + /// ```json + /// { + /// 'type': 'object', + /// 'properties': { + /// 'answer': { + /// 'type': 'string', + /// 'description': 'The answer to the question being asked', + /// }, + /// 'sources': { + /// 'type': 'array', + /// 'items': {'type': 'string'}, + /// 'description': 'The sources used to answer the question', + /// }, + /// }, + /// 'required': ['answer', 'sources'], + /// }, + /// ``` + final Map? responseSchema; + /// A list of unique [ChatGoogleGenerativeAISafetySetting] instances for blocking /// unsafe content. /// @@ -80,8 +115,7 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { /// the default safety setting for that category. final List? safetySettings; - /// Creates a copy of this [ChatGoogleGenerativeAIOptions] object with the given fields - /// replaced with the new values. + @override ChatGoogleGenerativeAIOptions copyWith({ final String? model, final double? topP, @@ -91,6 +125,9 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { final double? temperature, final List? stopSequences, final List? safetySettings, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatGoogleGenerativeAIOptions( model: model ?? this.model, @@ -101,8 +138,60 @@ class ChatGoogleGenerativeAIOptions extends ChatModelOptions { temperature: temperature ?? this.temperature, stopSequences: stopSequences ?? this.stopSequences, safetySettings: safetySettings ?? this.safetySettings, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + ChatGoogleGenerativeAIOptions merge( + covariant final ChatGoogleGenerativeAIOptions? other, + ) { + return copyWith( + model: other?.model, + topP: other?.topP, + topK: other?.topK, + candidateCount: other?.candidateCount, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + stopSequences: other?.stopSequences, + safetySettings: other?.safetySettings, + tools: other?.tools, + toolChoice: other?.toolChoice, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) { + return model == other.model && + topP == other.topP && + topK == other.topK && + candidateCount == other.candidateCount && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + stopSequences == other.stopSequences && + safetySettings == other.safetySettings && + tools == other.tools && + toolChoice == other.toolChoice && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + candidateCount.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + stopSequences.hashCode ^ + safetySettings.hashCode ^ + tools.hashCode ^ + toolChoice.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_google_generative_ai_safety_setting} diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart index 4f668b40..e79f00b4 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart @@ -117,8 +117,8 @@ class ChatVertexAI extends BaseChatModel { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const ChatVertexAIOptions( - publisher: 'google', - model: 'chat-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -139,6 +139,12 @@ class ChatVertexAI extends BaseChatModel { @override String get modelType => 'vertex-ai-chat'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'chat-bison'; + @override Future invoke( final PromptValue input, { @@ -158,19 +164,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.chat.predict( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - defaultOptions.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextChatModelRequestParams( maxOutputTokens: @@ -216,18 +218,15 @@ class ChatVertexAI extends BaseChatModel { final examples = (options?.examples ?? defaultOptions.examples) ?.map((final e) => e.toVertexAIChatExample()) .toList(growable: false); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.chat.countTokens( context: context, examples: examples, messages: vertexMessages, - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart index 49316c4e..50249bf3 100644 --- a/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart @@ -1,13 +1,20 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_vertex_ai_options} /// Options to pass into the Vertex AI Chat Model. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class ChatVertexAIOptions extends ChatModelOptions { /// {@macro chat_vertex_ai_options} const ChatVertexAIOptions({ - this.publisher = 'google', - this.model = 'chat-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -23,17 +30,6 @@ class ChatVertexAIOptions extends ChatModelOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `chat-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `chat-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -102,8 +98,7 @@ class ChatVertexAIOptions extends ChatModelOptions { /// List of messages to the model to learn how to respond to the conversation. final List? examples; - /// Creates a copy of this [ChatVertexAIOptions] object with the given fields - /// replaced with the new values. + @override ChatVertexAIOptions copyWith({ final String? publisher, final String? model, @@ -114,6 +109,9 @@ class ChatVertexAIOptions extends ChatModelOptions { final List? stopSequences, final int? candidateCount, final List? examples, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatVertexAIOptions( publisher: publisher ?? this.publisher, @@ -125,6 +123,52 @@ class ChatVertexAIOptions extends ChatModelOptions { stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, examples: examples ?? this.examples, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + ChatVertexAIOptions merge(covariant ChatVertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + examples: other?.examples, + concurrencyLimit: other?.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatVertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + const ListEquality().equals(examples, other.examples) && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + const ListEquality().hash(examples) ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart index b5996abd..263a2c44 100644 --- a/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart +++ b/packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart @@ -1,4 +1,3 @@ -import 'package:collection/collection.dart' show IterableNullableExtension; import 'package:google_generative_ai/google_generative_ai.dart' show Content, EmbedContentRequest, GenerativeModel, TaskType; import 'package:http/http.dart' as http; @@ -25,8 +24,6 @@ import '../../utils/https_client/http_client.dart'; /// /// - `text-embedding-004` /// * Dimensions: 768 (with support for reduced dimensionality) -/// - `embedding-001` -/// * Dimensions: 768 /// /// The previous list of models may not be exhaustive or up-to-date. Check out /// the [Google AI documentation](https://ai.google.dev/models/gemini) @@ -139,7 +136,6 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { /// The number of dimensions the resulting output embeddings should have. /// Only supported in `text-embedding-004` and later models. - /// TODO https://github.com/google-gemini/generative-ai-dart/pull/149 int? dimensions; /// The maximum number of documents to embed in a single request. @@ -169,13 +165,13 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { Content.text(doc.pageContent), taskType: TaskType.retrievalDocument, title: doc.metadata[docTitleKey], - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); }).toList(growable: false), ); return data.embeddings .map((final p) => p.values) - .whereNotNull() + .nonNulls .toList(growable: false); }), ); @@ -188,7 +184,7 @@ class GoogleGenerativeAIEmbeddings implements Embeddings { final data = await _googleAiClient.embedContent( Content.text(query), taskType: TaskType.retrievalQuery, - // outputDimensionality: dimensions, TODO + outputDimensionality: dimensions, ); return data.embedding.values; } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart index bf382c44..e11589c4 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/types.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/types.dart @@ -1,13 +1,19 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template vertex_ai_options} /// Options to pass into the Vertex AI LLM. +/// +/// You can find a list of available models here: +/// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models /// {@endtemplate} +@immutable class VertexAIOptions extends LLMOptions { /// {@macro vertex_ai_options} const VertexAIOptions({ - this.publisher = 'google', - this.model = 'text-bison', + this.publisher, + super.model, this.maxOutputTokens, this.temperature, this.topP, @@ -22,17 +28,6 @@ class VertexAIOptions extends LLMOptions { /// Use `google` for first-party models. final String? publisher; - /// The text model to use. - /// - /// To use the latest model version, specify the model name without a version - /// number (e.g. `text-bison`). - /// To use a stable model version, specify the model version number - /// (e.g. `text-bison@001`). - /// - /// You can find a list of available models here: - /// https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models - final String? model; - /// Maximum number of tokens that can be generated in the response. A token /// is approximately four characters. 100 tokens correspond to roughly /// 60-80 words. @@ -98,8 +93,7 @@ class VertexAIOptions extends LLMOptions { /// Range: `[1–8]` final int? candidateCount; - /// Creates a copy of this [VertexAIOptions] object with the given fields - /// replaced with the new values. + @override VertexAIOptions copyWith({ final String? publisher, final String? model, @@ -109,6 +103,7 @@ class VertexAIOptions extends LLMOptions { final int? topK, final List? stopSequences, final int? candidateCount, + final int? concurrencyLimit, }) { return VertexAIOptions( publisher: publisher ?? this.publisher, @@ -119,6 +114,49 @@ class VertexAIOptions extends LLMOptions { topK: topK ?? this.topK, stopSequences: stopSequences ?? this.stopSequences, candidateCount: candidateCount ?? this.candidateCount, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + VertexAIOptions merge(covariant final VertexAIOptions? other) { + return copyWith( + publisher: other?.publisher, + model: other?.model, + maxOutputTokens: other?.maxOutputTokens, + temperature: other?.temperature, + topP: other?.topP, + topK: other?.topK, + stopSequences: other?.stopSequences, + candidateCount: other?.candidateCount, + concurrencyLimit: other?.concurrencyLimit, ); } + + @override + bool operator ==(covariant final VertexAIOptions other) { + return publisher == other.publisher && + model == other.model && + maxOutputTokens == other.maxOutputTokens && + temperature == other.temperature && + topP == other.topP && + topK == other.topK && + const ListEquality() + .equals(stopSequences, other.stopSequences) && + candidateCount == other.candidateCount && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return publisher.hashCode ^ + model.hashCode ^ + maxOutputTokens.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + topK.hashCode ^ + const ListEquality().hash(stopSequences) ^ + candidateCount.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart index a0873fcc..955cc7ca 100644 --- a/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart +++ b/packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart @@ -123,8 +123,8 @@ class VertexAI extends BaseLLM { final String location = 'us-central1', final String? rootUrl, super.defaultOptions = const VertexAIOptions( - publisher: 'google', - model: 'text-bison', + publisher: defaultPublisher, + model: defaultModel, ), }) : client = VertexAIGenAIClient( httpClient: httpClient, @@ -145,21 +145,24 @@ class VertexAI extends BaseLLM { @override String get modelType => 'vertex-ai'; + /// The default publisher to use unless another is specified. + static const defaultPublisher = 'google'; + + /// The default model to use unless another is specified. + static const defaultModel = 'text-bison'; + @override Future invoke( final PromptValue input, { final VertexAIOptions? options, }) async { final id = _uuid.v4(); - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final result = await client.text.predict( prompt: input.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, parameters: VertexAITextModelRequestParams( maxOutputTokens: @@ -191,15 +194,12 @@ class VertexAI extends BaseLLM { final PromptValue promptValue, { final VertexAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final publisher = + options?.publisher ?? defaultOptions.publisher ?? defaultPublisher; + final model = options?.model ?? defaultOptions.model ?? defaultModel; final res = await client.text.countTokens( prompt: promptValue.toString(), - publisher: options?.publisher ?? - ArgumentError.checkNotNull( - defaultOptions.publisher, - 'VertexAIOptions.publisher', - ), + publisher: publisher, model: model, ); return res.totalTokens; diff --git a/packages/langchain_google/lib/src/utils/https_client/http_client.dart b/packages/langchain_google/lib/src/utils/https_client/http_client.dart index 479d2164..6b9ed76c 100644 --- a/packages/langchain_google/lib/src/utils/https_client/http_client.dart +++ b/packages/langchain_google/lib/src/utils/https_client/http_client.dart @@ -2,8 +2,7 @@ import 'package:http/http.dart' as http; export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; /// {@template custom_http_client} /// Custom HTTP client that wraps the base HTTP client and allows to override diff --git a/packages/langchain_google/pubspec.yaml b/packages/langchain_google/pubspec.yaml index da3c7388..810dbf62 100644 --- a/packages/langchain_google/pubspec.yaml +++ b/packages/langchain_google/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_google -description: LangChain.dart integration module for Google (VertexAI, PaLM, Matching Engine, etc.). -version: 0.5.0 +description: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.). +version: 0.6.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,20 +14,23 @@ topics: - vertex-ai environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' - fetch_client: ^1.0.2 - gcloud: ^0.8.12 - google_generative_ai: 0.4.0 - googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 - langchain_core: ^0.3.1 + collection: ^1.18.0 + fetch_client: ^1.1.2 + gcloud: ^0.8.13 + google_generative_ai: 0.4.4 + googleapis: ^13.0.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 + langchain_core: 0.3.6 meta: ^1.11.0 - uuid: ^4.3.3 - vertex_ai: ^0.1.0 + uuid: ^4.4.2 + vertex_ai: ^0.1.0+2 + firebase_core: ^3.6.0 dev_dependencies: test: ^1.25.2 + langchain: ^0.7.6 + fake_cloud_firestore: ^3.0.3 diff --git a/packages/langchain_google/pubspec_overrides.yaml b/packages/langchain_google/pubspec_overrides.yaml index 50319fbe..9844d8a9 100644 --- a/packages/langchain_google/pubspec_overrides.yaml +++ b/packages/langchain_google/pubspec_overrides.yaml @@ -1,6 +1,8 @@ -# melos_managed_dependency_overrides: vertex_ai,langchain_core +# melos_managed_dependency_overrides: langchain_core,vertex_ai dependency_overrides: langchain_core: path: ../langchain_core vertex_ai: path: ../vertex_ai + langchain_firebase: + path: ../langchain_firebase diff --git a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart index f6567f6d..6d692977 100644 --- a/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart +++ b/packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatGoogleGenerativeAI tests', () { - const defaultModel = 'gemini-1.5-pro-latest'; + const defaultModel = 'gemini-1.5-pro'; late ChatGoogleGenerativeAI chatModel; @@ -73,7 +73,7 @@ void main() { expect(res.output.content, isNotEmpty); }); - test('Text-and-image input with gemini-pro-vision', () async { + test('Text-and-image input', () async { final res = await chatModel.invoke( PromptValue.chat([ ChatMessage.human( @@ -89,9 +89,6 @@ void main() { ]), ), ]), - options: const ChatGoogleGenerativeAIOptions( - model: 'gemini-pro-vision', - ), ); expect(res.output.content.toLowerCase(), contains('apple')); @@ -122,7 +119,8 @@ void main() { ), ); expect(res.output.content.length, lessThan(20)); - expect(res.finishReason, FinishReason.length); + // It seems the gemini-1.5 doesn't return length reason anymore + // expect(res.finishReason, FinishReason.length); }); test('Test Multi-turn conversations with gemini-pro', () async { @@ -177,7 +175,7 @@ void main() { 'properties': { 'location': { 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', + 'description': 'The city and country, e.g. San Francisco, US', }, 'unit': { 'type': 'string', @@ -196,7 +194,7 @@ void main() { ); final humanMessage = ChatMessage.humanText( - 'What’s the weather like in Boston and Madrid right now in celsius?', + 'What’s the weather like in Boston, US and Madrid, Spain in Celsius?', ); final res1 = await model.invoke(PromptValue.chat([humanMessage])); diff --git a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart index bc942e51..a2f88906 100644 --- a/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart +++ b/packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart @@ -49,8 +49,7 @@ void main() { expect(res[1].length, 768); }); - // TODO https://github.com/google-gemini/generative-ai-dart/pull/149 - test('Test shortening embeddings', skip: true, () async { + test('Test shortening embeddings', () async { embeddings.dimensions = 256; final res = await embeddings.embedQuery('Hello world'); expect(res.length, 256); diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 576a8f6f..2f29e62b 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_huggingface issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_huggingface homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_microsoft/pubspec.yaml b/packages/langchain_microsoft/pubspec.yaml index 685287b7..11d0021c 100644 --- a/packages/langchain_microsoft/pubspec.yaml +++ b/packages/langchain_microsoft/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_microsoft issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_microsoft homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_mistralai/CHANGELOG.md b/packages/langchain_mistralai/CHANGELOG.md index c87fd2db..99b6c0e2 100644 --- a/packages/langchain_mistralai/CHANGELOG.md +++ b/packages/langchain_mistralai/CHANGELOG.md @@ -1,3 +1,29 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.3+1 + + - Update a dependency to the latest release. + +## 0.2.3 + + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +## 0.2.2 + + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.2.1 + + - Update a dependency to the latest release. + +## 0.2.1 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + ## 0.2.0+1 - Update a dependency to the latest release. diff --git a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart index ae0877a0..70f6bd4b 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart @@ -156,7 +156,7 @@ class ChatMistralAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatMistralAIOptions( - model: 'mistral-small', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = MistralAIClient( @@ -179,6 +179,9 @@ class ChatMistralAI extends BaseChatModel { @override String get modelType => 'chat-mistralai'; + /// The default model to use unless another is specified. + static const defaultModel = 'mistral-small'; + @override Future invoke( final PromptValue input, { @@ -216,7 +219,7 @@ class ChatMistralAI extends BaseChatModel { }) { return ChatCompletionRequest( model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), messages: messages.toChatCompletionMessages(), temperature: options?.temperature ?? defaultOptions.temperature, @@ -248,7 +251,7 @@ class ChatMistralAI extends BaseChatModel { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_mistralai/lib/src/chat_models/types.dart b/packages/langchain_mistralai/lib/src/chat_models/types.dart index 60158ea7..d9a75761 100644 --- a/packages/langchain_mistralai/lib/src/chat_models/types.dart +++ b/packages/langchain_mistralai/lib/src/chat_models/types.dart @@ -1,12 +1,17 @@ import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_mistral_ai_options} /// Options to pass into ChatMistralAI. +/// +/// You can check the list of available models [here](https://docs.mistral.ai/models). /// {@endtemplate} +@immutable class ChatMistralAIOptions extends ChatModelOptions { /// {@macro chat_mistral_ai_options} const ChatMistralAIOptions({ - this.model = 'mistral-small', + super.model, this.temperature, this.topP, this.maxTokens, @@ -15,11 +20,6 @@ class ChatMistralAIOptions extends ChatModelOptions { super.concurrencyLimit, }); - /// ID of the model to use. You can use the [List Available Models](https://docs.mistral.ai/api#operation/listModels) - /// API to see all of your available models, or see our [Model overview](https://docs.mistral.ai/models) - /// for model descriptions. - final String? model; - /// What sampling temperature to use, between 0.0 and 2.0. Higher values like /// 0.8 will make the output more random, while lower values like 0.2 will /// make it more focused and deterministic. @@ -47,8 +47,7 @@ class ChatMistralAIOptions extends ChatModelOptions { /// If set, different calls will generate deterministic results. final int? randomSeed; - /// Creates a copy of this [ChatMistralAIOptions] object with the given fields - /// replaced with the new values. + @override ChatMistralAIOptions copyWith({ final String? model, final double? temperature, @@ -56,6 +55,9 @@ class ChatMistralAIOptions extends ChatModelOptions { final int? maxTokens, final bool? safePrompt, final int? randomSeed, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatMistralAIOptions( model: model ?? this.model, @@ -64,6 +66,42 @@ class ChatMistralAIOptions extends ChatModelOptions { maxTokens: maxTokens ?? this.maxTokens, safePrompt: safePrompt ?? this.safePrompt, randomSeed: randomSeed ?? this.randomSeed, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, + ); + } + + @override + ChatMistralAIOptions merge(covariant ChatMistralAIOptions? other) { + return copyWith( + model: other?.model, + temperature: other?.temperature, + topP: other?.topP, + maxTokens: other?.maxTokens, + safePrompt: other?.safePrompt, + randomSeed: other?.randomSeed, + concurrencyLimit: other?.concurrencyLimit, ); } + + @override + bool operator ==(covariant final ChatMistralAIOptions other) { + return model == other.model && + temperature == other.temperature && + topP == other.topP && + maxTokens == other.maxTokens && + safePrompt == other.safePrompt && + randomSeed == other.randomSeed && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + maxTokens.hashCode ^ + safePrompt.hashCode ^ + randomSeed.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_mistralai/pubspec.yaml b/packages/langchain_mistralai/pubspec.yaml index 0059b2db..4b29f8a0 100644 --- a/packages/langchain_mistralai/pubspec.yaml +++ b/packages/langchain_mistralai/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_mistralai description: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.2.0+1 +version: 0.2.3+1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,15 +14,15 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' - http: ^1.1.0 - langchain_core: ^0.3.1 + collection: ^1.18.0 + http: ^1.2.2 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - mistralai_dart: ^0.0.3+1 + mistralai_dart: ^0.0.3+3 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_mistralai/pubspec_overrides.yaml b/packages/langchain_mistralai/pubspec_overrides.yaml index 4a44a89b..0bb3e94e 100644 --- a/packages/langchain_mistralai/pubspec_overrides.yaml +++ b/packages/langchain_mistralai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: mistralai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,mistralai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/CHANGELOG.md b/packages/langchain_ollama/CHANGELOG.md index 6f38a23b..f83459af 100644 --- a/packages/langchain_ollama/CHANGELOG.md +++ b/packages/langchain_ollama/CHANGELOG.md @@ -1,3 +1,37 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.3.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +## 0.3.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0)) + +## 0.3.0 + + - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +## 0.2.2+1 + + - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302)) + +## 0.2.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + +## 0.2.1+1 + + - Update a dependency to the latest release. + ## 0.2.1 - **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059)) diff --git a/packages/langchain_ollama/README.md b/packages/langchain_ollama/README.md index e6d6d884..885dbf9f 100644 --- a/packages/langchain_ollama/README.md +++ b/packages/langchain_ollama/README.md @@ -2,7 +2,7 @@ [![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) [![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment) -[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollam.svg)](https://pub.dev/packages/langchain_ollama) +[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama) [![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) [![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) @@ -13,7 +13,7 @@ - LLMs: * `Ollama`: wrapper around Ollama Completions API. - Chat models: - * `ChatOllama`: wrapper around Ollama Completions API in a chat-like fashion. + * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion. - Embeddings: * `OllamaEmbeddings`: wrapper around Ollama Embeddings API. diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart index 731f4e59..0232e939 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_models.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_models.dart @@ -1,2 +1,2 @@ -export 'chat_ollama.dart'; -export 'types.dart'; +export 'chat_ollama/chat_ollama.dart'; +export 'chat_ollama/types.dart'; diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart similarity index 70% rename from packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart index a62962e4..190170d6 100644 --- a/packages/langchain_ollama/lib/src/chat_models/chat_ollama.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart @@ -5,15 +5,14 @@ import 'package:langchain_tiktoken/langchain_tiktoken.dart'; import 'package:ollama_dart/ollama_dart.dart'; import 'package:uuid/uuid.dart'; -import '../llms/mappers.dart'; import 'mappers.dart'; import 'types.dart'; -/// Wrapper around [Ollama](https://ollama.ai) Completions API that enables +/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables /// to interact with the LLMs in a chat-like fashion. /// /// Ollama allows you to run open-source large language models, -/// such as Llama 3 or LLaVA, locally. +/// such as Llama 3.2, Gemma 2 or LLaVA, locally. /// /// For a complete list of supported models and model variants, see the /// [Ollama model library](https://ollama.ai/library). @@ -35,7 +34,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for Llama 3: `ollama pull llama3` +/// * e.g., for Llama 3: `ollama pull llama3.2` /// /// ### Ollama base URL /// @@ -56,7 +55,7 @@ import 'types.dart'; /// ```dart /// final chatModel = ChatOllama( /// defaultOptions: const ChatOllamaOptions( -/// model: 'llama3', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -88,7 +87,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -151,7 +150,7 @@ class ChatOllama extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -176,6 +175,9 @@ class ChatOllama extends BaseChatModel { @override String get modelType => 'chat-ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3.2'; + @override Future invoke( final PromptValue input, { @@ -183,9 +185,10 @@ class ChatOllama extends BaseChatModel { }) async { final id = _uuid.v4(); final completion = await _client.generateChatCompletion( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(id); @@ -199,9 +202,11 @@ class ChatOllama extends BaseChatModel { final id = _uuid.v4(); return _client .generateChatCompletionStream( - request: _generateCompletionRequest( + request: generateChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, + stream: true, ), ) .map( @@ -209,55 +214,6 @@ class ChatOllama extends BaseChatModel { ); } - /// Creates a [GenerateChatCompletionRequest] from the given input. - GenerateChatCompletionRequest _generateCompletionRequest( - final List messages, { - final bool stream = false, - final ChatOllamaOptions? options, - }) { - return GenerateChatCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), - messages: messages.toMessages(), - format: options?.format?.toResponseFormat(), - keepAlive: options?.keepAlive, - stream: stream, - options: RequestOptions( - numKeep: options?.numKeep ?? defaultOptions.numKeep, - seed: options?.seed ?? defaultOptions.seed, - numPredict: options?.numPredict ?? defaultOptions.numPredict, - topK: options?.topK ?? defaultOptions.topK, - topP: options?.topP ?? defaultOptions.topP, - tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, - typicalP: options?.typicalP ?? defaultOptions.typicalP, - repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, - temperature: options?.temperature ?? defaultOptions.temperature, - repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - mirostat: options?.mirostat ?? defaultOptions.mirostat, - mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, - mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, - penalizeNewline: - options?.penalizeNewline ?? defaultOptions.penalizeNewline, - stop: options?.stop ?? defaultOptions.stop, - numa: options?.numa ?? defaultOptions.numa, - numCtx: options?.numCtx ?? defaultOptions.numCtx, - numBatch: options?.numBatch ?? defaultOptions.numBatch, - numGpu: options?.numGpu ?? defaultOptions.numGpu, - mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, - lowVram: options?.lowVram ?? defaultOptions.lowVram, - f16Kv: options?.f16KV ?? defaultOptions.f16KV, - logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, - vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, - useMmap: options?.useMmap ?? defaultOptions.useMmap, - useMlock: options?.useMlock ?? defaultOptions.useMlock, - numThread: options?.numThread ?? defaultOptions.numThread, - ), - ); - } - /// Tokenizes the given prompt using tiktoken. /// /// Currently Ollama does not provide a tokenizer for the models it supports. @@ -278,7 +234,7 @@ class ChatOllama extends BaseChatModel { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart new file mode 100644 index 00000000..ce12e70f --- /dev/null +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart @@ -0,0 +1,267 @@ +// ignore_for_file: public_member_api_docs +import 'dart:convert'; + +import 'package:collection/collection.dart'; +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:ollama_dart/ollama_dart.dart' as o; +import 'package:uuid/uuid.dart'; + +import '../../llms/mappers.dart' show OllamaResponseFormatMapper; +import 'chat_ollama.dart'; +import 'types.dart'; + +/// Creates a [GenerateChatCompletionRequest] from the given input. +o.GenerateChatCompletionRequest generateChatCompletionRequest( + final List messages, { + required final ChatOllamaOptions? options, + required final ChatOllamaOptions defaultOptions, + final bool stream = false, +}) { + return o.GenerateChatCompletionRequest( + model: options?.model ?? defaultOptions.model ?? ChatOllama.defaultModel, + messages: messages.toMessages(), + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, + tools: _mapTools( + tools: options?.tools ?? defaultOptions.tools, + toolChoice: options?.toolChoice ?? defaultOptions.toolChoice, + ), + stream: stream, + options: o.RequestOptions( + numKeep: options?.numKeep ?? defaultOptions.numKeep, + seed: options?.seed ?? defaultOptions.seed, + numPredict: options?.numPredict ?? defaultOptions.numPredict, + topK: options?.topK ?? defaultOptions.topK, + topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, + tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, + typicalP: options?.typicalP ?? defaultOptions.typicalP, + repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, + temperature: options?.temperature ?? defaultOptions.temperature, + repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty, + presencePenalty: + options?.presencePenalty ?? defaultOptions.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + mirostat: options?.mirostat ?? defaultOptions.mirostat, + mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau, + mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta, + penalizeNewline: + options?.penalizeNewline ?? defaultOptions.penalizeNewline, + stop: options?.stop ?? defaultOptions.stop, + numa: options?.numa ?? defaultOptions.numa, + numCtx: options?.numCtx ?? defaultOptions.numCtx, + numBatch: options?.numBatch ?? defaultOptions.numBatch, + numGpu: options?.numGpu ?? defaultOptions.numGpu, + mainGpu: options?.mainGpu ?? defaultOptions.mainGpu, + lowVram: options?.lowVram ?? defaultOptions.lowVram, + f16Kv: options?.f16KV ?? defaultOptions.f16KV, + logitsAll: options?.logitsAll ?? defaultOptions.logitsAll, + vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly, + useMmap: options?.useMmap ?? defaultOptions.useMmap, + useMlock: options?.useMlock ?? defaultOptions.useMlock, + numThread: options?.numThread ?? defaultOptions.numThread, + ), + ); +} + +List? _mapTools({ + final List? tools, + final ChatToolChoice? toolChoice, +}) { + if (tools == null || tools.isEmpty) { + return null; + } + + return switch (toolChoice) { + ChatToolChoiceNone() => null, + ChatToolChoiceAuto() || + ChatToolChoiceRequired() || + null => + tools.map(_mapTool).toList(growable: false), + final ChatToolChoiceForced f => [ + _mapTool(tools.firstWhere((t) => t.name == f.name)), + ] + }; +} + +o.Tool _mapTool(final ToolSpec tool) { + return o.Tool( + function: o.ToolFunction( + name: tool.name, + description: tool.description, + parameters: tool.inputJsonSchema, + ), + ); +} + +extension OllamaChatMessagesMapper on List { + List toMessages() { + return map(_mapMessage).expand((final msg) => msg).toList(growable: false); + } + + List _mapMessage(final ChatMessage msg) { + return switch (msg) { + final SystemChatMessage msg => [ + o.Message( + role: o.MessageRole.system, + content: msg.content, + ), + ], + final HumanChatMessage msg => _mapHumanMessage(msg), + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => [ + o.Message( + role: o.MessageRole.tool, + content: msg.content, + ), + ], + CustomChatMessage() => + throw UnsupportedError('Ollama does not support custom messages'), + }; + } + + List _mapHumanMessage(final HumanChatMessage message) { + return switch (message.content) { + final ChatMessageContentText c => [ + o.Message( + role: o.MessageRole.user, + content: c.text, + ), + ], + final ChatMessageContentImage c => [ + o.Message( + role: o.MessageRole.user, + content: c.data, + ), + ], + final ChatMessageContentMultiModal c => _mapContentMultiModal(c), + }; + } + + List _mapContentMultiModal( + final ChatMessageContentMultiModal content, + ) { + final parts = content.parts.groupListsBy((final p) => p.runtimeType); + + if ((parts[ChatMessageContentMultiModal]?.length ?? 0) > 0) { + throw UnsupportedError( + 'Cannot have multimodal content in multimodal content', + ); + } + + // If there's only one text part and the rest are images, then we combine them in one message + if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { + return [ + o.Message( + role: o.MessageRole.user, + content: + (parts[ChatMessageContentText]!.first as ChatMessageContentText) + .text, + images: parts[ChatMessageContentImage] + ?.map((final p) => (p as ChatMessageContentImage).data) + .toList(growable: false), + ), + ]; + } + + // Otherwise, we return the parts as separate messages + return content.parts + .map( + (final p) => switch (p) { + final ChatMessageContentText c => o.Message( + role: o.MessageRole.user, + content: c.text, + ), + final ChatMessageContentImage c => o.Message( + role: o.MessageRole.user, + content: c.data, + ), + ChatMessageContentMultiModal() => throw UnsupportedError( + 'Cannot have multimodal content in multimodal content', + ), + }, + ) + .toList(growable: false); + } + + List _mapAIMessage(final AIChatMessage message) { + return [ + o.Message( + role: o.MessageRole.assistant, + content: message.content, + toolCalls: message.toolCalls.isNotEmpty + ? message.toolCalls.map(_mapToolCall).toList(growable: false) + : null, + ), + ]; + } + + o.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) { + return o.ToolCall( + function: o.ToolCallFunction( + name: toolCall.name, + arguments: toolCall.arguments, + ), + ); + } +} + +extension ChatResultMapper on o.GenerateChatCompletionResponse { + ChatResult toChatResult(final String id, {final bool streaming = false}) { + return ChatResult( + id: id, + output: AIChatMessage( + content: message.content, + toolCalls: + message.toolCalls?.map(_mapToolCall).toList(growable: false) ?? + const [], + ), + finishReason: _mapFinishReason(doneReason), + metadata: { + 'model': model, + 'created_at': createdAt, + 'done': done, + 'total_duration': totalDuration, + 'load_duration': loadDuration, + 'prompt_eval_count': promptEvalCount, + 'prompt_eval_duration': promptEvalDuration, + 'eval_count': evalCount, + 'eval_duration': evalDuration, + }, + usage: _mapUsage(), + streaming: streaming, + ); + } + + AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) { + return AIChatMessageToolCall( + id: const Uuid().v4(), + name: toolCall.function?.name ?? '', + argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}), + arguments: toolCall.function?.arguments ?? const {}, + ); + } + + LanguageModelUsage _mapUsage() { + return LanguageModelUsage( + promptTokens: promptEvalCount, + responseTokens: evalCount, + totalTokens: (promptEvalCount != null || evalCount != null) + ? (promptEvalCount ?? 0) + (evalCount ?? 0) + : null, + ); + } + + FinishReason _mapFinishReason( + final o.DoneReason? reason, + ) => + switch (reason) { + o.DoneReason.stop => FinishReason.stop, + o.DoneReason.length => FinishReason.length, + o.DoneReason.load => FinishReason.unspecified, + null => FinishReason.unspecified, + }; +} diff --git a/packages/langchain_ollama/lib/src/chat_models/types.dart b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart similarity index 61% rename from packages/langchain_ollama/lib/src/chat_models/types.dart rename to packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart index 3f14d2a2..cf02b00c 100644 --- a/packages/langchain_ollama/lib/src/chat_models/types.dart +++ b/packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart @@ -1,14 +1,22 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; -import '../llms/types.dart'; +import '../../../langchain_ollama.dart'; +import '../../llms/types.dart'; /// {@template chat_ollama_options} /// Options to pass into ChatOllama. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class ChatOllamaOptions extends ChatModelOptions { /// {@macro chat_ollama_options} const ChatOllamaOptions({ - this.model = 'llama3', + super.model, this.format, this.keepAlive, this.numKeep, @@ -16,6 +24,7 @@ class ChatOllamaOptions extends ChatModelOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -40,12 +49,11 @@ class ChatOllamaOptions extends ChatModelOptions { this.useMmap, this.useMlock, this.numThread, + super.tools, + super.toolChoice, super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The format to return a response in. Currently the only accepted value is /// json. /// @@ -84,12 +92,20 @@ class ChatOllamaOptions extends ChatModelOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -141,7 +157,7 @@ class ChatOllamaOptions extends ChatModelOptions { final double? mirostatEta; /// Penalize newlines in the output. - /// (Default: false) + /// (Default: true) final bool? penalizeNewline; /// Sequences where the API will stop generating further tokens. The returned @@ -172,7 +188,7 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? lowVram; /// Enable f16 key/value. - /// (Default: false) + /// (Default: true) final bool? f16KV; /// Enable logits all. @@ -197,16 +213,17 @@ class ChatOllamaOptions extends ChatModelOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [ChatOllamaOptions] object with the given fields - /// replaced with the new values. + @override ChatOllamaOptions copyWith({ final String? model, final OllamaResponseFormat? format, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -222,7 +239,6 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -231,19 +247,21 @@ class ChatOllamaOptions extends ChatModelOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final List? tools, + final ChatToolChoice? toolChoice, + final int? concurrencyLimit, }) { return ChatOllamaOptions( model: model ?? this.model, format: format ?? this.format, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -268,6 +286,125 @@ class ChatOllamaOptions extends ChatModelOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + tools: tools ?? this.tools, + toolChoice: toolChoice ?? this.toolChoice, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + ChatOllamaOptions merge(covariant final ChatOllamaOptions? other) { + return copyWith( + model: other?.model, + format: other?.format, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + minP: other?.minP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final ChatOllamaOptions other) { + return model == other.model && + format == other.format && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + minP == other.minP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + format.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + minP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_ollama/lib/src/chat_models/mappers.dart b/packages/langchain_ollama/lib/src/chat_models/mappers.dart deleted file mode 100644 index 0553fb88..00000000 --- a/packages/langchain_ollama/lib/src/chat_models/mappers.dart +++ /dev/null @@ -1,142 +0,0 @@ -// ignore_for_file: public_member_api_docs -import 'package:collection/collection.dart'; -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:ollama_dart/ollama_dart.dart'; - -extension OllamaChatMessagesMapper on List { - List toMessages() { - return map(_mapMessage).expand((final msg) => msg).toList(growable: false); - } - - List _mapMessage(final ChatMessage msg) { - return switch (msg) { - final SystemChatMessage msg => [ - Message( - role: MessageRole.system, - content: msg.content, - ), - ], - final HumanChatMessage msg => _mapHumanMessage(msg), - final AIChatMessage msg => [ - Message( - role: MessageRole.assistant, - content: msg.content, - ), - ], - ToolChatMessage() => - throw UnsupportedError('Ollama does not support tool calls'), - CustomChatMessage() => - throw UnsupportedError('Ollama does not support custom messages'), - }; - } - - List _mapHumanMessage(final HumanChatMessage message) { - return switch (message.content) { - final ChatMessageContentText c => [ - Message( - role: MessageRole.user, - content: c.text, - ), - ], - final ChatMessageContentImage c => [ - Message( - role: MessageRole.user, - content: c.data, - ), - ], - final ChatMessageContentMultiModal c => _mapContentMultiModal(c), - }; - } - - List _mapContentMultiModal( - final ChatMessageContentMultiModal content, - ) { - final parts = content.parts.groupListsBy((final p) => p.runtimeType); - - if ((parts[ChatMessageContentMultiModal]?.length ?? 0) > 0) { - throw UnsupportedError( - 'Cannot have multimodal content in multimodal content', - ); - } - - // If there's only one text part and the rest are images, then we combine them in one message - if ((parts[ChatMessageContentText]?.length ?? 0) == 1) { - return [ - Message( - role: MessageRole.user, - content: - (parts[ChatMessageContentText]!.first as ChatMessageContentText) - .text, - images: parts[ChatMessageContentImage] - ?.map((final p) => (p as ChatMessageContentImage).data) - .toList(growable: false), - ), - ]; - } - - // Otherwise, we return the parts as separate messages - return content.parts - .map( - (final p) => switch (p) { - final ChatMessageContentText c => Message( - role: MessageRole.user, - content: c.text, - ), - final ChatMessageContentImage c => Message( - role: MessageRole.user, - content: c.data, - ), - ChatMessageContentMultiModal() => throw UnsupportedError( - 'Cannot have multimodal content in multimodal content', - ), - }, - ) - .toList(growable: false); - } -} - -extension ChatResultMapper on GenerateChatCompletionResponse { - ChatResult toChatResult(final String id, {final bool streaming = false}) { - return ChatResult( - id: id, - output: AIChatMessage( - content: message?.content ?? '', - ), - finishReason: _mapFinishReason(doneReason), - metadata: { - 'model': model, - 'created_at': createdAt, - 'done': done, - 'total_duration': totalDuration, - 'load_duration': loadDuration, - 'prompt_eval_count': promptEvalCount, - 'prompt_eval_duration': promptEvalDuration, - 'eval_count': evalCount, - 'eval_duration': evalDuration, - }, - usage: _mapUsage(), - streaming: streaming, - ); - } - - LanguageModelUsage _mapUsage() { - return LanguageModelUsage( - promptTokens: promptEvalCount, - responseTokens: evalCount, - totalTokens: (promptEvalCount != null || evalCount != null) - ? (promptEvalCount ?? 0) + (evalCount ?? 0) - : null, - ); - } - - FinishReason _mapFinishReason( - final DoneReason? reason, - ) => - switch (reason) { - DoneReason.stop => FinishReason.stop, - DoneReason.length => FinishReason.length, - DoneReason.load => FinishReason.unspecified, - null => FinishReason.unspecified, - }; -} diff --git a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart index 66ac2edb..ffef2882 100644 --- a/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart +++ b/packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart @@ -13,7 +13,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// Example: /// ```dart -/// final embeddings = OllamaEmbeddings(model: 'llama3'); +/// final embeddings = OllamaEmbeddings(model: 'llama3.2'); /// final res = await embeddings.embedQuery('Hello world'); /// ``` /// @@ -23,7 +23,7 @@ import 'package:ollama_dart/ollama_dart.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -76,7 +76,7 @@ class OllamaEmbeddings implements Embeddings { /// - `client`: the HTTP client to use. You can set your own HTTP client if /// you need further customization (e.g. to use a Socks5 proxy). OllamaEmbeddings({ - this.model = 'llama3', + this.model = 'llama3.2', this.keepAlive, final String baseUrl = 'http://localhost:11434/api', final Map? headers, diff --git a/packages/langchain_ollama/lib/src/llms/ollama.dart b/packages/langchain_ollama/lib/src/llms/ollama.dart index 7eeb7e7c..9be8ed12 100644 --- a/packages/langchain_ollama/lib/src/llms/ollama.dart +++ b/packages/langchain_ollama/lib/src/llms/ollama.dart @@ -20,7 +20,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOption: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.2', /// temperature: 1, /// ), /// ); @@ -49,7 +49,7 @@ import 'types.dart'; /// ```dart /// final llm = Ollama( /// defaultOptions: const OllamaOptions( -/// model: 'llama3', +/// model: 'llama3.2', /// temperature: 0, /// format: 'json', /// ), @@ -83,7 +83,7 @@ import 'types.dart'; /// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?'); /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ -/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3')) | outputParser, +/// 'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.2')) | outputParser, /// 'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); @@ -93,7 +93,7 @@ import 'types.dart'; /// /// 1. Download and install [Ollama](https://ollama.ai) /// 2. Fetch a model via `ollama pull ` -/// * e.g., for `Llama-7b`: `ollama pull llama3` +/// * e.g., for `Llama-7b`: `ollama pull llama3.2` /// /// ### Advance /// @@ -152,7 +152,7 @@ class Ollama extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OllamaOptions( - model: 'llama3', + model: defaultModel, ), this.encoding = 'cl100k_base', }) : _client = OllamaClient( @@ -177,6 +177,9 @@ class Ollama extends BaseLLM { @override String get modelType => 'ollama'; + /// The default model to use unless another is specified. + static const defaultModel = 'llama3.2'; + @override Future invoke( final PromptValue input, { @@ -210,14 +213,15 @@ class Ollama extends BaseLLM { final OllamaOptions? options, }) { return GenerateCompletionRequest( - model: options?.model ?? defaultOptions.model ?? throwNullModelError(), + model: options?.model ?? defaultOptions.model ?? defaultModel, prompt: prompt, - system: options?.system, - template: options?.template, - context: options?.context, - format: options?.format?.toResponseFormat(), - raw: options?.raw, - keepAlive: options?.keepAlive, + system: options?.system ?? defaultOptions.system, + suffix: options?.suffix ?? defaultOptions.suffix, + template: options?.template ?? defaultOptions.template, + context: options?.context ?? defaultOptions.context, + format: (options?.format ?? defaultOptions.format)?.toResponseFormat(), + raw: options?.raw ?? defaultOptions.raw, + keepAlive: options?.keepAlive ?? defaultOptions.keepAlive, stream: stream, options: RequestOptions( numKeep: options?.numKeep ?? defaultOptions.numKeep, @@ -225,6 +229,7 @@ class Ollama extends BaseLLM { numPredict: options?.numPredict ?? defaultOptions.numPredict, topK: options?.topK ?? defaultOptions.topK, topP: options?.topP ?? defaultOptions.topP, + minP: options?.minP ?? defaultOptions.minP, tfsZ: options?.tfsZ ?? defaultOptions.tfsZ, typicalP: options?.typicalP ?? defaultOptions.typicalP, repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN, @@ -276,7 +281,7 @@ class Ollama extends BaseLLM { return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_ollama/lib/src/llms/types.dart b/packages/langchain_ollama/lib/src/llms/types.dart index dcbe7669..a8807248 100644 --- a/packages/langchain_ollama/lib/src/llms/types.dart +++ b/packages/langchain_ollama/lib/src/llms/types.dart @@ -1,13 +1,20 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/llms.dart'; +import 'package:meta/meta.dart'; /// {@template ollama_options} /// Options to pass into the Ollama LLM. +/// +/// For a complete list of supported models and model variants, see the +/// [Ollama model library](https://ollama.ai/library). /// {@endtemplate} +@immutable class OllamaOptions extends LLMOptions { /// {@macro ollama_options} const OllamaOptions({ - this.model = 'llama3', + super.model, this.system, + this.suffix, this.template, this.context, this.format, @@ -18,6 +25,7 @@ class OllamaOptions extends LLMOptions { this.numPredict, this.topK, this.topP, + this.minP, this.tfsZ, this.typicalP, this.repeatLastN, @@ -45,12 +53,12 @@ class OllamaOptions extends LLMOptions { super.concurrencyLimit, }); - /// The model used to generate completions - final String? model; - /// The system prompt (Overrides what is defined in the Modelfile). final String? system; + /// The text that comes after the inserted text. + final String? suffix; + /// The full prompt or prompt template (overrides what is defined in the /// Modelfile). final String? template; @@ -106,12 +114,20 @@ class OllamaOptions extends LLMOptions { /// (Default: 40) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more + /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more /// diverse text, while a lower value (e.g., 0.5) will generate more focused /// and conservative text. /// (Default: 0.9) final double? topP; + /// Alternative to the [topP], and aims to ensure a balance of quality and + /// variety. [minP] represents the minimum probability for a token to be + /// considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability + /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out. + /// (Default: 0.0) + final double? minP; + /// Tail free sampling is used to reduce the impact of less probable tokens /// from the output. A higher value (e.g., 2.0) will reduce the impact more, /// while a value of 1.0 disables this setting. @@ -219,20 +235,22 @@ class OllamaOptions extends LLMOptions { /// the logical number of cores). final int? numThread; - /// Creates a copy of this [OllamaOptions] object with the given fields - /// replaced with the new values. + @override OllamaOptions copyWith({ final String? model, final String? system, + final String? suffix, final String? template, final List? context, final OllamaResponseFormat? format, final bool? raw, + final int? keepAlive, final int? numKeep, final int? seed, final int? numPredict, final int? topK, final double? topP, + final double? minP, final double? tfsZ, final double? typicalP, final int? repeatLastN, @@ -248,7 +266,6 @@ class OllamaOptions extends LLMOptions { final bool? numa, final int? numCtx, final int? numBatch, - final int? numGqa, final int? numGpu, final int? mainGpu, final bool? lowVram, @@ -257,23 +274,24 @@ class OllamaOptions extends LLMOptions { final bool? vocabOnly, final bool? useMmap, final bool? useMlock, - final bool? embeddingOnly, - final double? ropeFrequencyBase, - final double? ropeFrequencyScale, final int? numThread, + final int? concurrencyLimit, }) { return OllamaOptions( model: model ?? this.model, system: system ?? this.system, + suffix: suffix ?? this.suffix, template: template ?? this.template, context: context ?? this.context, format: format ?? this.format, raw: raw ?? this.raw, + keepAlive: keepAlive ?? this.keepAlive, numKeep: numKeep ?? this.numKeep, seed: seed ?? this.seed, numPredict: numPredict ?? this.numPredict, topK: topK ?? this.topK, topP: topP ?? this.topP, + minP: minP ?? this.minP, tfsZ: tfsZ ?? this.tfsZ, typicalP: typicalP ?? this.typicalP, repeatLastN: repeatLastN ?? this.repeatLastN, @@ -298,8 +316,142 @@ class OllamaOptions extends LLMOptions { useMmap: useMmap ?? this.useMmap, useMlock: useMlock ?? this.useMlock, numThread: numThread ?? this.numThread, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, ); } + + @override + OllamaOptions merge(covariant final OllamaOptions? other) { + return copyWith( + model: other?.model, + system: other?.system, + suffix: other?.suffix, + template: other?.template, + context: other?.context, + format: other?.format, + raw: other?.raw, + keepAlive: other?.keepAlive, + numKeep: other?.numKeep, + seed: other?.seed, + numPredict: other?.numPredict, + topK: other?.topK, + topP: other?.topP, + minP: other?.minP, + tfsZ: other?.tfsZ, + typicalP: other?.typicalP, + repeatLastN: other?.repeatLastN, + temperature: other?.temperature, + repeatPenalty: other?.repeatPenalty, + presencePenalty: other?.presencePenalty, + frequencyPenalty: other?.frequencyPenalty, + mirostat: other?.mirostat, + mirostatTau: other?.mirostatTau, + mirostatEta: other?.mirostatEta, + penalizeNewline: other?.penalizeNewline, + stop: other?.stop, + numa: other?.numa, + numCtx: other?.numCtx, + numBatch: other?.numBatch, + numGpu: other?.numGpu, + mainGpu: other?.mainGpu, + lowVram: other?.lowVram, + f16KV: other?.f16KV, + logitsAll: other?.logitsAll, + vocabOnly: other?.vocabOnly, + useMmap: other?.useMmap, + useMlock: other?.useMlock, + numThread: other?.numThread, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final OllamaOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + system == other.system && + suffix == other.suffix && + template == other.template && + const ListEquality().equals(context, other.context) && + format == other.format && + raw == other.raw && + keepAlive == other.keepAlive && + numKeep == other.numKeep && + seed == other.seed && + numPredict == other.numPredict && + topK == other.topK && + topP == other.topP && + minP == other.minP && + tfsZ == other.tfsZ && + typicalP == other.typicalP && + repeatLastN == other.repeatLastN && + temperature == other.temperature && + repeatPenalty == other.repeatPenalty && + presencePenalty == other.presencePenalty && + frequencyPenalty == other.frequencyPenalty && + mirostat == other.mirostat && + mirostatTau == other.mirostatTau && + mirostatEta == other.mirostatEta && + penalizeNewline == other.penalizeNewline && + const ListEquality().equals(stop, other.stop) && + numa == other.numa && + numCtx == other.numCtx && + numBatch == other.numBatch && + numGpu == other.numGpu && + mainGpu == other.mainGpu && + lowVram == other.lowVram && + f16KV == other.f16KV && + logitsAll == other.logitsAll && + vocabOnly == other.vocabOnly && + useMmap == other.useMmap && + useMlock == other.useMlock && + numThread == other.numThread && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + system.hashCode ^ + suffix.hashCode ^ + template.hashCode ^ + const ListEquality().hash(context) ^ + format.hashCode ^ + raw.hashCode ^ + keepAlive.hashCode ^ + numKeep.hashCode ^ + seed.hashCode ^ + numPredict.hashCode ^ + topK.hashCode ^ + topP.hashCode ^ + minP.hashCode ^ + tfsZ.hashCode ^ + typicalP.hashCode ^ + repeatLastN.hashCode ^ + temperature.hashCode ^ + repeatPenalty.hashCode ^ + presencePenalty.hashCode ^ + frequencyPenalty.hashCode ^ + mirostat.hashCode ^ + mirostatTau.hashCode ^ + mirostatEta.hashCode ^ + penalizeNewline.hashCode ^ + const ListEquality().hash(stop) ^ + numa.hashCode ^ + numCtx.hashCode ^ + numBatch.hashCode ^ + numGpu.hashCode ^ + mainGpu.hashCode ^ + lowVram.hashCode ^ + f16KV.hashCode ^ + logitsAll.hashCode ^ + vocabOnly.hashCode ^ + useMmap.hashCode ^ + useMlock.hashCode ^ + numThread.hashCode ^ + concurrencyLimit.hashCode; + } } /// The format to return a response in. diff --git a/packages/langchain_ollama/pubspec.yaml b/packages/langchain_ollama/pubspec.yaml index a8c94263..eb2c1fc8 100644 --- a/packages/langchain_ollama/pubspec.yaml +++ b/packages/langchain_ollama/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_ollama -description: LangChain.dart integration module for Ollama (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.2.1 +description: LangChain.dart integration module for Ollama (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). +version: 0.3.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,16 +14,16 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' - http: ^1.1.0 - langchain_core: ^0.3.1 + collection: ^1.18.0 + http: ^1.2.2 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - ollama_dart: ^0.1.0 - uuid: ^4.3.3 + ollama_dart: ^0.2.2 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/packages/langchain_ollama/pubspec_overrides.yaml b/packages/langchain_ollama/pubspec_overrides.yaml index 9090f50e..1cab36be 100644 --- a/packages/langchain_ollama/pubspec_overrides.yaml +++ b/packages/langchain_ollama/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: ollama_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,ollama_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart index 0fa46c03..9aac4640 100644 --- a/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart +++ b/packages/langchain_ollama/test/chat_models/chat_ollama_test.dart @@ -6,13 +6,14 @@ import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/output_parsers.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_ollama/langchain_ollama.dart'; import 'package:test/test.dart'; void main() { group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () { late ChatOllama chatModel; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.2'; const visionModel = 'llava:latest'; setUp(() async { @@ -107,7 +108,7 @@ void main() { ]), ); expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + res.output.content.replaceAll(RegExp(r'[\s\n-]'), ''), contains('123456789'), ); expect(res.finishReason, FinishReason.stop); @@ -209,10 +210,10 @@ void main() { test('Test Multi-turn conversations', () async { final prompt = PromptValue.chat([ - ChatMessage.humanText('List the numbers from 1 to 9 in order. '), + ChatMessage.humanText('List the numbers from 1 to 9 in order.'), ChatMessage.ai('123456789'), ChatMessage.humanText( - 'Remove the number "4" from the list', + 'Remove the number "4" from the list. Output only the remaining numbers in ascending order.', ), ]); final res = await chatModel.invoke( @@ -251,5 +252,160 @@ void main() { expect(res.output.content.toLowerCase(), contains('apple')); }); + + const tool1 = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + const tool2 = ToolSpec( + name: 'get_historic_weather', + description: 'Get the historic weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + final model = chatModel.bind( + const ChatOllamaOptions( + model: defaultModel, + tools: [tool1], + ), + ); + + final humanMessage = ChatMessage.humanText( + "What's the weather like in Boston and Madrid right now in celsius?", + ); + final res1 = await model.invoke(PromptValue.chat([humanMessage])); + + final aiMessage1 = res1.output; + expect(aiMessage1.toolCalls, hasLength(2)); + + final toolCall1 = aiMessage1.toolCalls.first; + expect(toolCall1.name, tool1.name); + expect(toolCall1.arguments.containsKey('location'), isTrue); + expect(toolCall1.arguments['location'], contains('Boston')); + expect(toolCall1.arguments['unit'], 'celsius'); + + final toolCall2 = aiMessage1.toolCalls.last; + expect(toolCall2.name, tool1.name); + expect(toolCall2.arguments.containsKey('location'), isTrue); + expect(toolCall2.arguments['location'], contains('Madrid')); + expect(toolCall2.arguments['unit'], 'celsius'); + + final functionResult1 = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage1 = ChatMessage.tool( + toolCallId: toolCall1.id, + content: json.encode(functionResult1), + ); + + final functionResult2 = { + 'temperature': '25', + 'unit': 'celsius', + 'description': 'Cloudy', + }; + final functionMessage2 = ChatMessage.tool( + toolCallId: toolCall2.id, + content: json.encode(functionResult2), + ); + + final res2 = await model.invoke( + PromptValue.chat([ + humanMessage, + aiMessage1, + functionMessage1, + functionMessage2, + ]), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.content, contains('25')); + }); + + test('Test multi tool call', () async { + final res = await chatModel.invoke( + PromptValue.string( + "What's the weather in Vellore, India and in Barcelona, Spain?", + ), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1, tool2], + ), + ); + expect(res.output.toolCalls, hasLength(2)); + final toolCall1 = res.output.toolCalls.first; + expect(toolCall1.name, 'get_current_weather'); + expect(toolCall1.argumentsRaw, isNotEmpty); + expect(toolCall1.arguments, isNotEmpty); + expect(toolCall1.arguments['location'], 'Vellore, India'); + expect(toolCall1.arguments['unit'], 'celsius'); + final toolCall2 = res.output.toolCalls.last; + expect(toolCall2.name, 'get_current_weather'); + expect(toolCall2.argumentsRaw, isNotEmpty); + expect(toolCall2.arguments, isNotEmpty); + expect(toolCall2.arguments['location'], 'Barcelona, Spain'); + expect(toolCall2.arguments['unit'], 'celsius'); + expect(res.finishReason, FinishReason.stop); + }); + + test('Test ChatToolChoice.none', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: const ChatOllamaOptions( + model: defaultModel, + tools: [tool1], + toolChoice: ChatToolChoice.none, + ), + ); + expect(res.output.toolCalls, isEmpty); + expect(res.output.content, isNotEmpty); + }); + + test('Test ChatToolChoice.forced', () async { + final res = await chatModel.invoke( + PromptValue.string("What's the weather in Vellore, India?"), + options: ChatOllamaOptions( + model: defaultModel, + tools: const [tool1, tool2], + toolChoice: ChatToolChoice.forced(name: tool2.name), + ), + ); + expect(res.output.toolCalls, hasLength(1)); + final toolCall = res.output.toolCalls.first; + expect(toolCall.name, tool2.name); + }); }); } diff --git a/packages/langchain_ollama/test/embeddings/ollama_test.dart b/packages/langchain_ollama/test/embeddings/ollama_test.dart index 0f94ad0d..5363d47c 100644 --- a/packages/langchain_ollama/test/embeddings/ollama_test.dart +++ b/packages/langchain_ollama/test/embeddings/ollama_test.dart @@ -8,7 +8,7 @@ void main() { group('OllamaEmbeddings tests', skip: Platform.environment.containsKey('CI'), () { late OllamaEmbeddings embeddings; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.2'; setUp(() async { embeddings = OllamaEmbeddings( diff --git a/packages/langchain_ollama/test/llms/ollama_test.dart b/packages/langchain_ollama/test/llms/ollama_test.dart index e9a6ac55..7426b0c6 100644 --- a/packages/langchain_ollama/test/llms/ollama_test.dart +++ b/packages/langchain_ollama/test/llms/ollama_test.dart @@ -10,7 +10,7 @@ import 'package:test/test.dart'; void main() { group('Ollama tests', skip: Platform.environment.containsKey('CI'), () { late Ollama llm; - const defaultModel = 'llama3:latest'; + const defaultModel = 'llama3.2'; setUp(() async { llm = Ollama( diff --git a/packages/langchain_openai/CHANGELOG.md b/packages/langchain_openai/CHANGELOG.md index 2d1e113a..a7a12549 100644 --- a/packages/langchain_openai/CHANGELOG.md +++ b/packages/langchain_openai/CHANGELOG.md @@ -1,3 +1,45 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.7.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5)) + +## 0.7.1 + + - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f)) + - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a)) + - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab)) + - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8)) + +## 0.7.0 + + - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84)) + - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7)) + - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43)) + - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560)) + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.6.3 + + - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649)) + +## 0.6.2 + + - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5)) + - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace)) + +## 0.6.1+1 + + - Update a dependency to the latest release. + ## 0.6.1 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) @@ -73,7 +115,7 @@ ## 0.3.2 - - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.com/#/modules/model_io/models/chat_models/integrations/open_router)) + - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/open_router)) - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e)) - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f)) - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f)) @@ -253,7 +295,7 @@ - Initial public release. Check out the announcement post for all the details: -https://blog.langchaindart.com/introducing-langchain-dart-6b1d34fc41ef +https://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef ## 0.0.1-dev.7 diff --git a/packages/langchain_openai/README.md b/packages/langchain_openai/README.md index b7a080da..39073b3b 100644 --- a/packages/langchain_openai/README.md +++ b/packages/langchain_openai/README.md @@ -20,8 +20,6 @@ OpenAI module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart * `OpenAIQAWithStructureChain` a chain that answer questions in the specified structure. * `OpenAIQAWithSourcesChain`: a chain that answer questions providing sources. -- Agents: - * `OpenAIToolsAgent`: an agent driven by OpenAIs Tools powered API. - Tools: * `OpenAIDallETool`: a tool that uses DallE to generate images from text. diff --git a/packages/langchain_openai/lib/fix_data/fix.yaml b/packages/langchain_openai/lib/fix_data/fix.yaml new file mode 100644 index 00000000..5db14fd0 --- /dev/null +++ b/packages/langchain_openai/lib/fix_data/fix.yaml @@ -0,0 +1,19 @@ +version: 1 + +transforms: + - title: "Migrate to 'ToolsAgent'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgent' + changes: + - kind: 'rename' + newName: 'ToolsAgent' + - title: "Migrate to 'ToolsAgentOutputParser'" + date: 2024-08-21 + element: + uris: ['langchain_openai.dart', 'src/agents/tools.dart'] + class: 'OpenAIToolsAgentOutputParser' + changes: + - kind: 'rename' + newName: 'ToolsAgentOutputParser' diff --git a/packages/langchain_openai/lib/langchain_openai.dart b/packages/langchain_openai/lib/langchain_openai.dart index d2730a6b..77e92aa5 100644 --- a/packages/langchain_openai/lib/langchain_openai.dart +++ b/packages/langchain_openai/lib/langchain_openai.dart @@ -1,4 +1,4 @@ -/// LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). +/// LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.). library; export 'package:openai_dart/openai_dart.dart' show OpenAIClientException; diff --git a/packages/langchain_openai/lib/src/agents/tools.dart b/packages/langchain_openai/lib/src/agents/tools.dart index 2867427d..a1a13583 100644 --- a/packages/langchain_openai/lib/src/agents/tools.dart +++ b/packages/langchain_openai/lib/src/agents/tools.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package import 'package:langchain_core/agents.dart'; import 'package:langchain_core/chains.dart'; import 'package:langchain_core/chat_models.dart'; @@ -17,6 +18,11 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( ); /// {@template openai_tools_agent} +/// > Note: This class is deprecated. Use `ToolsAgent` (from the `langchain` +/// > package instead). It works with the same API as this class, but can be +/// > used with any provider that supports tool calling. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// An Agent driven by OpenAI's Tools powered API. /// /// Example: @@ -27,7 +33,7 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// temperature: 0, /// ); /// final tools = [CalculatorTool()]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools); +/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools); /// final executor = AgentExecutor(agent: agent); /// final res = await executor.run('What is 40 raised to the 0.43 power? '); /// ``` @@ -69,8 +75,10 @@ const _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate( /// template if you only need to customize the system message or add some /// extra messages. /// {@endtemplate} +@Deprecated('Use ToolsAgent instead') class OpenAIToolsAgent extends BaseSingleActionAgent { /// {@macro openai_functions_agent} + @Deprecated('Use ToolsAgent instead') OpenAIToolsAgent({ required this.llmChain, required super.tools, @@ -118,6 +126,7 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { /// the first in the prompt. Default: "You are a helpful AI assistant". /// - [extraPromptMessages] prompt messages that will be placed between the /// system message and the input from the agent. + @Deprecated('Use ToolsAgent.fromLLMAndTools() instead') factory OpenAIToolsAgent.fromLLMAndTools({ required final ChatOpenAI llm, required final List tools, @@ -241,14 +250,21 @@ class OpenAIToolsAgent extends BaseSingleActionAgent { } /// {@template openai_tools_agent_output_parser} +/// > Note: This class is deprecated. Use `ToolsAgentOutputParser` (from the +/// > `langchain` package instead). It is equivalent to this class, but +/// > prepared to work with the `ToolsAgent`. +/// > You can run `dart fix --apply` to automatically update your code. +/// /// Parser for [OpenAIToolsAgent]. /// /// It parses the output of the LLM and returns the corresponding /// [BaseAgentAction] to be executed. /// {@endtemplate} +@Deprecated('Use ToolsAgentOutputParser instead') class OpenAIToolsAgentOutputParser extends BaseOutputParser> { /// {@macro openai_tools_agent_output_parser} + @Deprecated('Use ToolsAgentOutputParser instead') const OpenAIToolsAgentOutputParser() : super(defaultOptions: const OutputParserOptions()); diff --git a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart index 207577a1..7c812836 100644 --- a/packages/langchain_openai/lib/src/chains/qa_with_sources.dart +++ b/packages/langchain_openai/lib/src/chains/qa_with_sources.dart @@ -12,7 +12,7 @@ import 'qa_with_structure.dart'; /// ```dart /// final llm = ChatOpenAI( /// apiKey: openaiApiKey, -/// model: 'gpt-3.5-turbo-0613', +/// model: 'gpt-4o-mini', /// temperature: 0, /// ); /// final qaChain = OpenAIQAWithSourcesChain(llm: llm); diff --git a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart index 053bf481..c8a670f5 100644 --- a/packages/langchain_openai/lib/src/chat_models/chat_openai.dart +++ b/packages/langchain_openai/lib/src/chat_models/chat_openai.dart @@ -25,8 +25,10 @@ import 'types.dart'; /// - [Completions API docs](https://platform.openai.com/docs/api-reference/chat) /// /// You can also use this wrapper to consume OpenAI-compatible APIs like -/// [Anyscale](https://www.anyscale.com), [Together AI](https://www.together.ai), -/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), etc. +/// [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), +/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), +/// [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), +/// [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. /// /// ### Call options /// @@ -74,7 +76,7 @@ import 'types.dart'; /// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?'); /// final chain = Runnable.fromMap({ /// 'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser, -/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser, +/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser, /// }); /// final res = await chain.invoke({'name': 'David'}); /// ``` @@ -172,7 +174,7 @@ class ChatOpenAI extends BaseChatModel { /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). /// - [ChatOpenAI.encoding] - /// - [OpenAI.defaultOptions] + /// - [ChatOpenAI.defaultOptions] /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -192,12 +194,13 @@ class ChatOpenAI extends BaseChatModel { final Map? queryParams, final http.Client? client, super.defaultOptions = const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', + model: defaultModel, ), this.encoding, }) : _client = OpenAIClient( apiKey: apiKey ?? '', organization: organization, + beta: null, baseUrl: baseUrl, headers: headers, queryParams: queryParams, @@ -236,15 +239,19 @@ class ChatOpenAI extends BaseChatModel { @override String get modelType => 'openai-chat'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-4o-mini'; + @override Future invoke( final PromptValue input, { final ChatOpenAIOptions? options, }) async { final completion = await _client.createChatCompletion( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, ), ); return completion.toChatResult(completion.id ?? _uuid.v4()); @@ -257,9 +264,10 @@ class ChatOpenAI extends BaseChatModel { }) { return _client .createChatCompletionStream( - request: _createChatCompletionRequest( + request: createChatCompletionRequest( input.toChatMessages(), options: options, + defaultOptions: defaultOptions, stream: true, ), ) @@ -269,48 +277,6 @@ class ChatOpenAI extends BaseChatModel { ); } - /// Creates a [CreateChatCompletionRequest] from the given input. - CreateChatCompletionRequest _createChatCompletionRequest( - final List messages, { - final ChatOpenAIOptions? options, - final bool stream = false, - }) { - final messagesDtos = messages.toChatCompletionMessages(); - final toolsDtos = options?.tools?.toChatCompletionTool() ?? - defaultOptions.tools?.toChatCompletionTool(); - final toolChoice = options?.toolChoice?.toChatCompletionToolChoice() ?? - defaultOptions.toolChoice?.toChatCompletionToolChoice(); - final responseFormat = - options?.responseFormat ?? defaultOptions.responseFormat; - final responseFormatDto = responseFormat?.toChatCompletionResponseFormat(); - - return CreateChatCompletionRequest( - model: ChatCompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), - ), - messages: messagesDtos, - tools: toolsDtos, - toolChoice: toolChoice, - frequencyPenalty: - options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, - logitBias: options?.logitBias ?? defaultOptions.logitBias, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, - n: options?.n ?? defaultOptions.n, - presencePenalty: - options?.presencePenalty ?? defaultOptions.presencePenalty, - responseFormat: responseFormatDto, - seed: options?.seed ?? defaultOptions.seed, - stop: (options?.stop ?? defaultOptions.stop) != null - ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) - : null, - temperature: options?.temperature ?? defaultOptions.temperature, - topP: options?.topP ?? defaultOptions.topP, - user: options?.user ?? defaultOptions.user, - streamOptions: - stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, - ); - } - /// Tokenizes the given prompt using tiktoken with the encoding used by the /// [model]. If an encoding model is specified in [encoding] field, that /// encoding is used instead. @@ -329,8 +295,7 @@ class ChatOpenAI extends BaseChatModel { final PromptValue promptValue, { final ChatOpenAIOptions? options, }) async { - final model = - options?.model ?? defaultOptions.model ?? throwNullModelError(); + final model = options?.model ?? defaultOptions.model ?? defaultModel; final tiktoken = _getTiktoken(); final messages = promptValue.toChatMessages(); @@ -339,7 +304,6 @@ class ChatOpenAI extends BaseChatModel { final int tokensPerName; switch (model) { - case 'gpt-3.5-turbo-0613': case 'gpt-3.5-turbo-16k-0613': case 'gpt-4-0314': case 'gpt-4-32k-0314': @@ -353,8 +317,8 @@ class ChatOpenAI extends BaseChatModel { // If there's a name, the role is omitted tokensPerName = -1; default: - if (model.startsWith('gpt-3.5-turbo') || model.startsWith('gpt-4')) { - // Returning num tokens assuming gpt-3.5-turbo-0613 + if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) { + // Returning num tokens assuming gpt-4 tokensPerMessage = 3; tokensPerName = 1; } else { @@ -399,7 +363,7 @@ class ChatOpenAI extends BaseChatModel { : getEncoding('cl100k_base'); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/chat_models/mappers.dart b/packages/langchain_openai/lib/src/chat_models/mappers.dart index 78054bed..ad8bec0b 100644 --- a/packages/langchain_openai/lib/src/chat_models/mappers.dart +++ b/packages/langchain_openai/lib/src/chat_models/mappers.dart @@ -6,8 +6,56 @@ import 'package:langchain_core/language_models.dart'; import 'package:langchain_core/tools.dart'; import 'package:openai_dart/openai_dart.dart'; +import 'chat_openai.dart'; import 'types.dart'; +/// Creates a [CreateChatCompletionRequest] from the given input. +CreateChatCompletionRequest createChatCompletionRequest( + final List messages, { + required final ChatOpenAIOptions? options, + required final ChatOpenAIOptions defaultOptions, + final bool stream = false, +}) { + final messagesDtos = messages.toChatCompletionMessages(); + final toolsDtos = + (options?.tools ?? defaultOptions.tools)?.toChatCompletionTool(); + final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice) + ?.toChatCompletionToolChoice(); + final responseFormatDto = + (options?.responseFormat ?? defaultOptions.responseFormat) + ?.toChatCompletionResponseFormat(); + final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier) + .toCreateChatCompletionRequestServiceTier(); + + return CreateChatCompletionRequest( + model: ChatCompletionModel.modelId( + options?.model ?? defaultOptions.model ?? ChatOpenAI.defaultModel, + ), + messages: messagesDtos, + tools: toolsDtos, + toolChoice: toolChoice, + frequencyPenalty: + options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, + logitBias: options?.logitBias ?? defaultOptions.logitBias, + maxCompletionTokens: options?.maxTokens ?? defaultOptions.maxTokens, + n: options?.n ?? defaultOptions.n, + presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, + responseFormat: responseFormatDto, + seed: options?.seed ?? defaultOptions.seed, + stop: (options?.stop ?? defaultOptions.stop) != null + ? ChatCompletionStop.listString(options?.stop ?? defaultOptions.stop!) + : null, + temperature: options?.temperature ?? defaultOptions.temperature, + topP: options?.topP ?? defaultOptions.topP, + parallelToolCalls: + options?.parallelToolCalls ?? defaultOptions.parallelToolCalls, + serviceTier: serviceTierDto, + user: options?.user ?? defaultOptions.user, + streamOptions: + stream ? const ChatCompletionStreamOptions(includeUsage: true) : null, + ); +} + extension ChatMessageListMapper on List { List toChatCompletionMessages() { return map(_mapMessage).toList(growable: false); @@ -15,36 +63,36 @@ extension ChatMessageListMapper on List { ChatCompletionMessage _mapMessage(final ChatMessage msg) { return switch (msg) { - final SystemChatMessage systemChatMessage => ChatCompletionMessage.system( - content: systemChatMessage.content, - ), - final HumanChatMessage humanChatMessage => ChatCompletionMessage.user( - content: switch (humanChatMessage.content) { - final ChatMessageContentText c => _mapMessageContentString(c), - final ChatMessageContentImage c => - ChatCompletionUserMessageContent.parts( - [_mapMessageContentPartImage(c)], - ), - final ChatMessageContentMultiModal c => _mapMessageContentPart(c), - }, - ), - final AIChatMessage aiChatMessage => ChatCompletionMessage.assistant( - content: aiChatMessage.content, - toolCalls: aiChatMessage.toolCalls.isNotEmpty - ? aiChatMessage.toolCalls - .map(_mapMessageToolCall) - .toList(growable: false) - : null, - ), - final ToolChatMessage toolChatMessage => ChatCompletionMessage.tool( - toolCallId: toolChatMessage.toolCallId, - content: toolChatMessage.content, - ), + final SystemChatMessage msg => _mapSystemMessage(msg), + final HumanChatMessage msg => _mapHumanMessage(msg), + final AIChatMessage msg => _mapAIMessage(msg), + final ToolChatMessage msg => _mapToolMessage(msg), CustomChatMessage() => throw UnsupportedError('OpenAI does not support custom messages'), }; } + ChatCompletionMessage _mapSystemMessage( + final SystemChatMessage systemChatMessage, + ) { + return ChatCompletionMessage.system(content: systemChatMessage.content); + } + + ChatCompletionMessage _mapHumanMessage( + final HumanChatMessage humanChatMessage, + ) { + return ChatCompletionMessage.user( + content: switch (humanChatMessage.content) { + final ChatMessageContentText c => _mapMessageContentString(c), + final ChatMessageContentImage c => + ChatCompletionUserMessageContent.parts( + [_mapMessageContentPartImage(c)], + ), + final ChatMessageContentMultiModal c => _mapMessageContentPart(c), + }, + ); + } + ChatCompletionUserMessageContentString _mapMessageContentString( final ChatMessageContentText c, ) { @@ -105,6 +153,17 @@ extension ChatMessageListMapper on List { return ChatCompletionMessageContentParts(partsList); } + ChatCompletionMessage _mapAIMessage(final AIChatMessage aiChatMessage) { + return ChatCompletionMessage.assistant( + content: aiChatMessage.content, + toolCalls: aiChatMessage.toolCalls.isNotEmpty + ? aiChatMessage.toolCalls + .map(_mapMessageToolCall) + .toList(growable: false) + : null, + ); + } + ChatCompletionMessageToolCall _mapMessageToolCall( final AIChatMessageToolCall toolCall, ) { @@ -117,12 +176,26 @@ extension ChatMessageListMapper on List { ), ); } + + ChatCompletionMessage _mapToolMessage( + final ToolChatMessage toolChatMessage, + ) { + return ChatCompletionMessage.tool( + toolCallId: toolChatMessage.toolCallId, + content: toolChatMessage.content, + ); + } } extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { ChatResult toChatResult(final String id) { final choice = choices.first; final msg = choice.message; + + if (msg.refusal != null && msg.refusal!.isNotEmpty) { + throw OpenAIRefusalException(msg.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( @@ -136,6 +209,7 @@ extension CreateChatCompletionResponseMapper on CreateChatCompletionResponse { 'model': model, 'created': created, 'system_fingerprint': systemFingerprint, + 'logprobs': choice.logprobs?.toMap(), }, usage: _mapUsage(usage), ); @@ -193,6 +267,9 @@ extension ChatToolChoiceMapper on ChatToolChoice { ChatToolChoiceAuto _ => const ChatCompletionToolChoiceOption.mode( ChatCompletionToolChoiceMode.auto, ), + ChatToolChoiceRequired() => const ChatCompletionToolChoiceOption.mode( + ChatCompletionToolChoiceMode.required, + ), final ChatToolChoiceForced t => ChatCompletionToolChoiceOption.tool( ChatCompletionNamedToolChoice( type: ChatCompletionNamedToolChoiceType.function, @@ -208,6 +285,11 @@ extension CreateChatCompletionStreamResponseMapper ChatResult toChatResult(final String id) { final choice = choices.firstOrNull; final delta = choice?.delta; + + if (delta?.refusal != null && delta!.refusal!.isNotEmpty) { + throw OpenAIRefusalException(delta.refusal!); + } + return ChatResult( id: id, output: AIChatMessage( @@ -245,18 +327,33 @@ extension CreateChatCompletionStreamResponseMapper } extension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat { - ChatCompletionResponseFormat toChatCompletionResponseFormat() { - return ChatCompletionResponseFormat( - type: switch (type) { - ChatOpenAIResponseFormatType.text => - ChatCompletionResponseFormatType.text, - ChatOpenAIResponseFormatType.jsonObject => - ChatCompletionResponseFormatType.jsonObject, - }, - ); + ResponseFormat toChatCompletionResponseFormat() { + return switch (this) { + ChatOpenAIResponseFormatText() => const ResponseFormat.text(), + ChatOpenAIResponseFormatJsonObject() => const ResponseFormat.jsonObject(), + final ChatOpenAIResponseFormatJsonSchema res => ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: res.jsonSchema.name, + description: res.jsonSchema.description, + schema: res.jsonSchema.schema, + strict: res.jsonSchema.strict, + ), + ), + }; } } +extension ChatOpenAIServiceTierX on ChatOpenAIServiceTier? { + CreateChatCompletionRequestServiceTier? + toCreateChatCompletionRequestServiceTier() => switch (this) { + ChatOpenAIServiceTier.auto => + CreateChatCompletionRequestServiceTier.auto, + ChatOpenAIServiceTier.vDefault => + CreateChatCompletionRequestServiceTier.vDefault, + null => null, + }; +} + FinishReason _mapFinishReason( final ChatCompletionFinishReason? reason, ) => diff --git a/packages/langchain_openai/lib/src/chat_models/types.dart b/packages/langchain_openai/lib/src/chat_models/types.dart index a82ab9a1..3173e293 100644 --- a/packages/langchain_openai/lib/src/chat_models/types.dart +++ b/packages/langchain_openai/lib/src/chat_models/types.dart @@ -1,13 +1,51 @@ +import 'package:collection/collection.dart'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; /// {@template chat_openai_options} /// Options to pass into the OpenAI Chat Model. +/// +/// Available [ChatOpenAIOptions.model]s: +/// - `chatgpt-4o-latest` +/// - `gpt-4` +/// - `gpt-4-32k` +/// - `gpt-4-32k-0314` +/// - `gpt-4-32k-0613` +/// - `gpt-4-0125-preview` +/// - `gpt-4-0314` +/// - `gpt-4-0613` +/// - `gpt-4-1106-preview` +/// - `gpt-4-turbo` +/// - `gpt-4-turbo-2024-04-09` +/// - `gpt-4-turbo-preview` +/// - `gpt-4-vision-preview` +/// - `gpt-4o` +/// - `gpt-4o-2024-05-13` +/// - `gpt-4o-2024-08-06` +/// - `gpt-4o-2024-08-06` +/// - `gpt-4o-mini` +/// - `gpt-4o-mini-2024-07-18` +/// - `gpt-3.5-turbo` +/// - `gpt-3.5-turbo-16k` +/// - `gpt-3.5-turbo-16k-0613` +/// - `gpt-3.5-turbo-0125` +/// - `gpt-3.5-turbo-0301` +/// - `gpt-3.5-turbo-0613` +/// - `gpt-3.5-turbo-1106` +/// - `o1-mini` +/// - `o1-mini-2024-09-12` +/// - `o1-preview` +/// - `o1-preview-2024-09-12` +/// +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} +@immutable class ChatOpenAIOptions extends ChatModelOptions { /// {@macro chat_openai_options} const ChatOpenAIOptions({ - this.model = 'gpt-3.5-turbo', + super.model, this.frequencyPenalty, this.logitBias, this.maxTokens, @@ -18,38 +56,14 @@ class ChatOpenAIOptions extends ChatModelOptions { this.stop, this.temperature, this.topP, - this.user, super.tools, super.toolChoice, + this.parallelToolCalls, + this.serviceTier, + this.user, super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo'). - /// - /// Available models: - /// - `gpt-4` - /// - `gpt-4-0314` - /// - `gpt-4-0613` - /// - `gpt-4-32k` - /// - `gpt-4-32k-0314` - /// - `gpt-4-32k-0613` - /// - `gpt-4-turbo-preview` - /// - `gpt-4-1106-preview` - /// - `gpt-4-0125-preview` - /// - `gpt-4-vision-preview` - /// - `gpt-4o` - /// - `gpt-4o-2024-05-13` - /// - `gpt-3.5-turbo` - /// - `gpt-3.5-turbo-16k` - /// - `gpt-3.5-turbo-0301` - /// - `gpt-3.5-turbo-0613` - /// - `gpt-3.5-turbo-1106` - /// - `gpt-3.5-turbo-16k-0613` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on /// their existing frequency in the text so far, decreasing the model's /// likelihood to repeat the same line verbatim. @@ -123,14 +137,24 @@ class ChatOpenAIOptions extends ChatModelOptions { /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p final double? topP; + /// Whether to enable parallel tool calling during tool use. + /// By default, it is enabled. + /// + /// + /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling + final bool? parallelToolCalls; + + /// Specifies the latency tier to use for processing the request. + /// This is relevant for customers subscribed to the scale tier service. + final ChatOpenAIServiceTier? serviceTier; + /// A unique identifier representing your end-user, which can help OpenAI to /// monitor and detect abuse. /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [ChatOpenAIOptions] object with the given fields - /// replaced with the new values. + @override ChatOpenAIOptions copyWith({ final String? model, final double? frequencyPenalty, @@ -143,9 +167,12 @@ class ChatOpenAIOptions extends ChatModelOptions { final List? stop, final double? temperature, final double? topP, - final String? user, final List? tools, final ChatToolChoice? toolChoice, + final bool? parallelToolCalls, + final ChatOpenAIServiceTier? serviceTier, + final String? user, + final int? concurrencyLimit, }) { return ChatOpenAIOptions( model: model ?? this.model, @@ -159,32 +186,223 @@ class ChatOpenAIOptions extends ChatModelOptions { stop: stop ?? this.stop, temperature: temperature ?? this.temperature, topP: topP ?? this.topP, - user: user ?? this.user, tools: tools ?? this.tools, toolChoice: toolChoice ?? this.toolChoice, + parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls, + serviceTier: serviceTier ?? this.serviceTier, + user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit, ); } + + @override + ChatOpenAIOptions merge(covariant final ChatOpenAIOptions? other) { + return copyWith( + model: other?.model, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + responseFormat: other?.responseFormat, + seed: other?.seed, + stop: other?.stop, + temperature: other?.temperature, + topP: other?.topP, + tools: other?.tools, + toolChoice: other?.toolChoice, + parallelToolCalls: other?.parallelToolCalls, + serviceTier: other?.serviceTier, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final ChatOpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + responseFormat == other.responseFormat && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + temperature == other.temperature && + topP == other.topP && + const ListEquality().equals(tools, other.tools) && + toolChoice == other.toolChoice && + parallelToolCalls == other.parallelToolCalls && + serviceTier == other.serviceTier && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + responseFormat.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + temperature.hashCode ^ + topP.hashCode ^ + const ListEquality().hash(tools) ^ + toolChoice.hashCode ^ + parallelToolCalls.hashCode ^ + serviceTier.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } /// {@template chat_openai_response_format} /// An object specifying the format that the model must output. /// {@endtemplate} -class ChatOpenAIResponseFormat { - /// {@macro chat_openai_response_format} - const ChatOpenAIResponseFormat({ - required this.type, +sealed class ChatOpenAIResponseFormat { + const ChatOpenAIResponseFormat(); + + /// The model will respond with text. + static const text = ChatOpenAIResponseFormatText(); + + /// The model will respond with a valid JSON object. + static const jsonObject = ChatOpenAIResponseFormatJsonObject(); + + /// The model will respond with a valid JSON object that adheres to the + /// specified schema. + factory ChatOpenAIResponseFormat.jsonSchema( + final ChatOpenAIJsonSchema jsonSchema, + ) => + ChatOpenAIResponseFormatJsonSchema(jsonSchema: jsonSchema); +} + +/// {@template chat_openai_response_format_text} +/// The model will respond with text. +/// {@endtemplate} +class ChatOpenAIResponseFormatText extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_text} + const ChatOpenAIResponseFormatText(); +} + +/// {@template chat_openai_response_format_json_object} +/// The model will respond with a valid JSON object. +/// {@endtemplate} +class ChatOpenAIResponseFormatJsonObject extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_object} + const ChatOpenAIResponseFormatJsonObject(); +} + +/// {@template chat_openai_response_format_json_schema} +/// The model will respond with a valid JSON object that adheres to the +/// specified schema. +/// {@endtemplate} +@immutable +class ChatOpenAIResponseFormatJsonSchema extends ChatOpenAIResponseFormat { + /// {@macro chat_openai_response_format_json_schema} + const ChatOpenAIResponseFormatJsonSchema({ + required this.jsonSchema, + }); + + /// The JSON schema that the model must adhere to. + final ChatOpenAIJsonSchema jsonSchema; + + @override + bool operator ==(covariant ChatOpenAIResponseFormatJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && jsonSchema == other.jsonSchema; + } + + @override + int get hashCode => jsonSchema.hashCode; +} + +/// {@template chat_openai_json_schema} +/// Specifies the schema for the response format. +/// {@endtemplate} +@immutable +class ChatOpenAIJsonSchema { + /// {@macro chat_openai_json_schema} + const ChatOpenAIJsonSchema({ + required this.name, + required this.schema, + this.description, + this.strict = false, }); - /// The format type. - final ChatOpenAIResponseFormatType type; + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain + /// underscores and dashes, with a maximum length of 64. + final String name; + + /// A description of what the response format is for, used by the model to + /// determine how to respond in the format. + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map schema; + + /// Whether to enable strict schema adherence when generating the output. + /// If set to true, the model will always follow the exact schema defined in + /// the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + final bool strict; + + @override + bool operator ==(covariant ChatOpenAIJsonSchema other) { + return identical(this, other) || + runtimeType == other.runtimeType && + name == other.name && + description == other.description && + const MapEquality().equals(schema, other.schema) && + strict == other.strict; + } + + @override + int get hashCode { + return name.hashCode ^ + description.hashCode ^ + const MapEquality().hash(schema) ^ + strict.hashCode; + } } -/// Types of response formats. -enum ChatOpenAIResponseFormatType { - /// Standard text mode. - text, +/// Specifies the latency tier to use for processing the request. +/// This is relevant for customers subscribed to the scale tier service. +enum ChatOpenAIServiceTier { + /// The system will utilize scale tier credits until they are exhausted. + auto, - /// [ChatOpenAIResponseFormatType.jsonObject] enables JSON mode, which - /// guarantees the message the model generates is valid JSON. - jsonObject, + /// The request will be processed using the default service tier with a lower + /// uptime SLA and no latency guarantee. + vDefault, +} + +/// {@template openai_refusal_exception} +/// Exception thrown when OpenAI Structured Outputs API returns a refusal. +/// +/// When using OpenAI's Structured Outputs API with user-generated input, the +/// model may occasionally refuse to fulfill the request for safety reasons. +/// +/// See here for more on refusals: +/// https://platform.openai.com/docs/guides/structured-outputs/refusals +/// {@endtemplate} +class OpenAIRefusalException implements Exception { + /// {@macro openai_refusal_exception} + const OpenAIRefusalException(this.message); + + /// The refusal message. + final String message; + + @override + String toString() { + return 'OpenAIRefusalException: $message'; + } } diff --git a/packages/langchain_openai/lib/src/llms/openai.dart b/packages/langchain_openai/lib/src/llms/openai.dart index 086b8b8a..aed0e9e9 100644 --- a/packages/langchain_openai/lib/src/llms/openai.dart +++ b/packages/langchain_openai/lib/src/llms/openai.dart @@ -1,3 +1,5 @@ +import 'dart:math'; + import 'package:http/http.dart' as http; import 'package:langchain_core/llms.dart'; import 'package:langchain_core/prompts.dart'; @@ -186,8 +188,9 @@ class OpenAI extends BaseLLM { final Map? queryParams, final http.Client? client, super.defaultOptions = const OpenAIOptions( - model: 'gpt-3.5-turbo-instruct', - maxTokens: 256, + model: defaultModel, + maxTokens: defaultMaxTokens, + concurrencyLimit: defaultConcurrencyLimit, ), this.encoding, }) : _client = OpenAIClient( @@ -228,6 +231,15 @@ class OpenAI extends BaseLLM { @override String get modelType => 'openai'; + /// The default model to use unless another is specified. + static const defaultModel = 'gpt-3.5-turbo-instruct'; + + /// The default max tokens to use unless another is specified. + static const defaultMaxTokens = 256; + + /// The default concurrency limit to use unless another is specified. + static const defaultConcurrencyLimit = 20; + @override Future invoke( final PromptValue input, { @@ -259,7 +271,8 @@ class OpenAI extends BaseLLM { // Otherwise, we can batch the calls to the API final finalOptions = options?.first ?? defaultOptions; - final concurrencyLimit = finalOptions.concurrencyLimit; + final concurrencyLimit = + min(finalOptions.concurrencyLimit, defaultConcurrencyLimit); var index = 0; final results = []; @@ -302,7 +315,7 @@ class OpenAI extends BaseLLM { }) { return CreateCompletionRequest( model: CompletionModel.modelId( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ), prompt: CompletionPrompt.listString(prompts), bestOf: options?.bestOf ?? defaultOptions.bestOf, @@ -310,7 +323,8 @@ class OpenAI extends BaseLLM { options?.frequencyPenalty ?? defaultOptions.frequencyPenalty, logitBias: options?.logitBias ?? defaultOptions.logitBias, logprobs: options?.logprobs ?? defaultOptions.logprobs, - maxTokens: options?.maxTokens ?? defaultOptions.maxTokens, + maxTokens: + options?.maxTokens ?? defaultOptions.maxTokens ?? defaultMaxTokens, n: options?.n ?? defaultOptions.n, presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty, @@ -340,12 +354,12 @@ class OpenAI extends BaseLLM { final encoding = this.encoding != null ? getEncoding(this.encoding!) : encodingForModel( - options?.model ?? defaultOptions.model ?? throwNullModelError(), + options?.model ?? defaultOptions.model ?? defaultModel, ); return encoding.encode(promptValue.toString()); } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/llms/types.dart b/packages/langchain_openai/lib/src/llms/types.dart index 6869a4c4..a6bc2ee2 100644 --- a/packages/langchain_openai/lib/src/llms/types.dart +++ b/packages/langchain_openai/lib/src/llms/types.dart @@ -4,17 +4,24 @@ import 'package:meta/meta.dart'; /// {@template openai_options} /// Options to pass into the OpenAI LLM. +/// +/// Available models: +/// - `gpt-3.5-turbo-instruct` +/// - `davinci-002` +/// - `babbage-002` +/// Mind that the list may be outdated. +/// See https://platform.openai.com/docs/models for the latest list. /// {@endtemplate} @immutable class OpenAIOptions extends LLMOptions { /// {@macro openai_options} const OpenAIOptions({ - this.model = 'gpt-3.5-turbo-instruct', + super.model, this.bestOf, this.frequencyPenalty, this.logitBias, this.logprobs, - this.maxTokens = 256, + this.maxTokens, this.n, this.presencePenalty, this.seed, @@ -23,20 +30,9 @@ class OpenAIOptions extends LLMOptions { this.temperature, this.topP, this.user, - super.concurrencyLimit = 20, + super.concurrencyLimit, }); - /// ID of the model to use (e.g. 'gpt-3.5-turbo-instruct'). - /// - /// Available models: - /// - `gpt-3.5-turbo-instruct` - /// - `davinci-002` - /// - `babbage-002` - /// - /// Mind that the list may be outdated. - /// See https://platform.openai.com/docs/models for the latest list. - final String? model; - /// Generates best_of completions server-side and returns the "best" /// (the one with the highest log probability per token). /// @@ -125,8 +121,7 @@ class OpenAIOptions extends LLMOptions { /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; - /// Creates a copy of this [OpenAIOptions] object with the given fields - /// replaced with the new values. + @override OpenAIOptions copyWith({ final String? model, final int? bestOf, @@ -142,6 +137,7 @@ class OpenAIOptions extends LLMOptions { final double? temperature, final double? topP, final String? user, + final int? concurrencyLimit, }) { return OpenAIOptions( model: model ?? this.model, @@ -158,42 +154,69 @@ class OpenAIOptions extends LLMOptions { temperature: temperature ?? this.temperature, topP: topP ?? this.topP, user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + OpenAIOptions merge(covariant final OpenAIOptions? other) { + return copyWith( + model: other?.model, + bestOf: other?.bestOf, + frequencyPenalty: other?.frequencyPenalty, + logitBias: other?.logitBias, + logprobs: other?.logprobs, + maxTokens: other?.maxTokens, + n: other?.n, + presencePenalty: other?.presencePenalty, + seed: other?.seed, + stop: other?.stop, + suffix: other?.suffix, + temperature: other?.temperature, + topP: other?.topP, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, ); } @override - bool operator ==(covariant final OpenAIOptions other) => - identical(this, other) || - runtimeType == other.runtimeType && - model == other.model && - bestOf == other.bestOf && - frequencyPenalty == other.frequencyPenalty && - const MapEquality().equals(logitBias, other.logitBias) && - logprobs == other.logprobs && - maxTokens == other.maxTokens && - n == other.n && - presencePenalty == other.presencePenalty && - seed == other.seed && - stop == other.stop && - suffix == other.suffix && - temperature == other.temperature && - topP == other.topP && - user == other.user; + bool operator ==(covariant final OpenAIOptions other) { + return identical(this, other) || + runtimeType == other.runtimeType && + model == other.model && + bestOf == other.bestOf && + frequencyPenalty == other.frequencyPenalty && + const MapEquality() + .equals(logitBias, other.logitBias) && + logprobs == other.logprobs && + maxTokens == other.maxTokens && + n == other.n && + presencePenalty == other.presencePenalty && + seed == other.seed && + const ListEquality().equals(stop, other.stop) && + suffix == other.suffix && + temperature == other.temperature && + topP == other.topP && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } @override - int get hashCode => - model.hashCode ^ - bestOf.hashCode ^ - frequencyPenalty.hashCode ^ - const MapEquality().hash(logitBias) ^ - logprobs.hashCode ^ - maxTokens.hashCode ^ - n.hashCode ^ - presencePenalty.hashCode ^ - seed.hashCode ^ - stop.hashCode ^ - suffix.hashCode ^ - temperature.hashCode ^ - topP.hashCode ^ - user.hashCode; + int get hashCode { + return model.hashCode ^ + bestOf.hashCode ^ + frequencyPenalty.hashCode ^ + const MapEquality().hash(logitBias) ^ + logprobs.hashCode ^ + maxTokens.hashCode ^ + n.hashCode ^ + presencePenalty.hashCode ^ + seed.hashCode ^ + const ListEquality().hash(stop) ^ + suffix.hashCode ^ + temperature.hashCode ^ + topP.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/lib/src/tools/dall_e.dart b/packages/langchain_openai/lib/src/tools/dall_e.dart index 3137dcfa..aefba7b9 100644 --- a/packages/langchain_openai/lib/src/tools/dall_e.dart +++ b/packages/langchain_openai/lib/src/tools/dall_e.dart @@ -34,7 +34,7 @@ export 'package:openai_dart/openai_dart.dart' /// ), /// ), /// ]; -/// final agent = OpenAIToolsAgent.fromLLMAndTools( +/// final agent = ToolsAgent.fromLLMAndTools( /// llm: llm, /// tools: tools, /// ); @@ -111,7 +111,7 @@ final class OpenAIDallETool extends StringTool { } } - /// Closes the client and cleans up any resources associated with it. + @override void close() { _client.endSession(); } diff --git a/packages/langchain_openai/lib/src/tools/types.dart b/packages/langchain_openai/lib/src/tools/types.dart index 3b049dc6..086ba0f5 100644 --- a/packages/langchain_openai/lib/src/tools/types.dart +++ b/packages/langchain_openai/lib/src/tools/types.dart @@ -1,10 +1,12 @@ import 'package:langchain_core/tools.dart'; +import 'package:meta/meta.dart'; import 'dall_e.dart'; /// {@template open_ai_dall_e_tool_options} /// Generation options to pass into the [OpenAIDallETool]. /// {@endtemplate} +@immutable class OpenAIDallEToolOptions extends ToolOptions { /// {@macro open_ai_dall_e_tool_options} const OpenAIDallEToolOptions({ @@ -14,6 +16,7 @@ class OpenAIDallEToolOptions extends ToolOptions { this.size = ImageSize.v1024x1024, this.style = ImageStyle.vivid, this.user, + super.concurrencyLimit, }); /// ID of the model to use (e.g. `dall-e-2` or 'dall-e-3'). @@ -63,4 +66,60 @@ class OpenAIDallEToolOptions extends ToolOptions { /// /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids final String? user; + + @override + OpenAIDallEToolOptions copyWith({ + final String? model, + final ImageQuality? quality, + final ImageResponseFormat? responseFormat, + final ImageSize? size, + final ImageStyle? style, + final String? user, + final int? concurrencyLimit, + }) { + return OpenAIDallEToolOptions( + model: model ?? this.model, + quality: quality ?? this.quality, + responseFormat: responseFormat ?? this.responseFormat, + size: size ?? this.size, + style: style ?? this.style, + user: user ?? this.user, + concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit, + ); + } + + @override + OpenAIDallEToolOptions merge(covariant final OpenAIDallEToolOptions? other) { + return copyWith( + model: other?.model, + quality: other?.quality, + responseFormat: other?.responseFormat, + size: other?.size, + style: other?.style, + user: other?.user, + concurrencyLimit: other?.concurrencyLimit, + ); + } + + @override + bool operator ==(covariant final OpenAIDallEToolOptions other) { + return model == other.model && + quality == other.quality && + responseFormat == other.responseFormat && + size == other.size && + style == other.style && + user == other.user && + concurrencyLimit == other.concurrencyLimit; + } + + @override + int get hashCode { + return model.hashCode ^ + quality.hashCode ^ + responseFormat.hashCode ^ + size.hashCode ^ + style.hashCode ^ + user.hashCode ^ + concurrencyLimit.hashCode; + } } diff --git a/packages/langchain_openai/pubspec.yaml b/packages/langchain_openai/pubspec.yaml index 9b7be49d..1161ee71 100644 --- a/packages/langchain_openai/pubspec.yaml +++ b/packages/langchain_openai/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_openai -description: LangChain.dart integration module for OpenAI (GPT-3, GPT-4, Functions, etc.). -version: 0.6.1 +description: LangChain.dart integration module for OpenAI (GPT-4o, o1, Embeddings, DALL·E, etc.). +version: 0.7.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,18 +14,18 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' - http: ^1.1.0 - langchain_core: ^0.3.1 + collection: ^1.18.0 + http: ^1.2.2 + langchain_core: 0.3.6 langchain_tiktoken: ^1.0.1 meta: ^1.11.0 - openai_dart: ^0.3.2 - uuid: ^4.3.3 + openai_dart: ^0.4.2 + uuid: ^4.4.2 dev_dependencies: - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - test: ^1.25.2 + langchain: ^0.7.6 + langchain_community: 0.3.2 + test: ^1.25.8 diff --git a/packages/langchain_openai/pubspec_overrides.yaml b/packages/langchain_openai/pubspec_overrides.yaml index 18a3bcaa..92ad1620 100644 --- a/packages/langchain_openai/pubspec_overrides.yaml +++ b/packages/langchain_openai/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: openai_dart,langchain_core,langchain_community,langchain +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain @@ -8,3 +8,5 @@ dependency_overrides: path: ../langchain_core openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_openai/test/agents/tools_test.dart b/packages/langchain_openai/test/agents/tools_test.dart index 57342631..03b52dea 100644 --- a/packages/langchain_openai/test/agents/tools_test.dart +++ b/packages/langchain_openai/test/agents/tools_test.dart @@ -1,3 +1,4 @@ +// ignore_for_file: deprecated_member_use_from_same_package @TestOn('vm') library; // Uses dart:io @@ -23,7 +24,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -45,7 +45,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ); @@ -134,7 +133,6 @@ void main() { final model = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', temperature: 0, ), ).bind(ChatOpenAIOptions(tools: [tool])); diff --git a/packages/langchain_openai/test/chains/qa_with_sources_test.dart b/packages/langchain_openai/test/chains/qa_with_sources_test.dart index b1080986..c655af98 100644 --- a/packages/langchain_openai/test/chains/qa_with_sources_test.dart +++ b/packages/langchain_openai/test/chains/qa_with_sources_test.dart @@ -53,7 +53,6 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo-0613', temperature: 0, ), ); @@ -126,7 +125,6 @@ Question: {question} final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-3.5-turbo', temperature: 0, ), ); diff --git a/packages/langchain_openai/test/chat_models/anyscale_test.dart b/packages/langchain_openai/test/chat_models/anyscale_test.dart deleted file mode 100644 index 1a2fdef1..00000000 --- a/packages/langchain_openai/test/chat_models/anyscale_test.dart +++ /dev/null @@ -1,115 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_core/chat_models.dart'; -import 'package:langchain_core/language_models.dart'; -import 'package:langchain_core/prompts.dart'; -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale tests', () { - late ChatOpenAI chatModel; - - setUp(() async { - chatModel = ChatOpenAI( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - chatModel.close(); - }); - - test('Test invoke Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', - ]; - for (final model in models) { - final res = await chatModel.invoke( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - expect(res.id, isNotEmpty); - expect( - res.finishReason, - isNot(FinishReason.unspecified), - reason: model, - ); - expect( - res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), - contains('123456789'), - reason: model, - ); - expect(res.metadata, isNotNull, reason: model); - expect(res.metadata['created'], greaterThan(0), reason: model); - expect(res.metadata['model'], isNotEmpty, reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test stream Anyscale API with different models', () async { - final models = [ - 'meta-llama/Llama-2-70b-chat-hf', - 'codellama/CodeLlama-34b-Instruct-hf', - 'mistralai/Mistral-7B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'HuggingFaceH4/zephyr-7b-beta', - 'Open-Orca/Mistral-7B-OpenOrca', - ]; - for (final model in models) { - final stream = chatModel.stream( - PromptValue.string( - 'List the numbers from 1 to 9 in order. ' - 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS:', - ), - options: ChatOpenAIOptions(model: model), - ); - - String content = ''; - int count = 0; - await for (final res in stream) { - content += res.output.content.replaceAll(RegExp(r'[\s\n,]'), ''); - count++; - } - expect(count, greaterThan(1), reason: model); - expect(content, contains('123456789'), reason: model); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - - test('Test countTokens', () async { - final models = [ - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'mistralai/Mistral-7B-Instruct-v0.2', - 'NousResearch/Nous-Hermes-2-Yi-34B', - 'openchat/openchat-3.5-1210', - 'togethercomputer/llama-2-70b-chat', - 'togethercomputer/falcon-40b-instruct', - ]; - for (final model in models) { - const text = 'Hello, how are you?'; - - final numTokens = await chatModel.countTokens( - PromptValue.chat([ChatMessage.humanText(text)]), - options: ChatOpenAIOptions(model: model), - ); - expect(numTokens, 13, reason: model); - } - }); - }); -} diff --git a/packages/langchain_openai/test/chat_models/chat_openai_test.dart b/packages/langchain_openai/test/chat_models/chat_openai_test.dart index 6268a77b..a0ea44fb 100644 --- a/packages/langchain_openai/test/chat_models/chat_openai_test.dart +++ b/packages/langchain_openai/test/chat_models/chat_openai_test.dart @@ -14,7 +14,7 @@ import 'package:test/test.dart'; void main() { group('ChatOpenAI tests', () { final openaiApiKey = Platform.environment['OPENAI_API_KEY']; - const defaultModel = 'gpt-3.5-turbo'; + const defaultModel = 'gpt-4o-mini'; test('Test ChatOpenAI parameters', () async { final chat = ChatOpenAI( @@ -118,36 +118,36 @@ void main() { expect(res.content, isNotEmpty); }); + const getCurrentWeatherTool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + test('Test ChatOpenAI tool calling', timeout: const Timeout(Duration(minutes: 1)), () async { final chat = ChatOpenAI(apiKey: openaiApiKey); - const tool = ToolSpec( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - final humanMessage = ChatMessage.humanText( 'What’s the weather like in Boston right now?', ); final res1 = await chat.invoke( PromptValue.chat([humanMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage1 = res1.output; @@ -156,7 +156,7 @@ void main() { expect(aiMessage1.toolCalls, isNotEmpty); final toolCall = aiMessage1.toolCalls.first; - expect(toolCall.name, tool.name); + expect(toolCall.name, getCurrentWeatherTool.name); expect(toolCall.arguments.containsKey('location'), isTrue); expect(toolCall.arguments['location'], contains('Boston')); @@ -172,7 +172,7 @@ void main() { final res2 = await chat.invoke( PromptValue.chat([humanMessage, aiMessage1, functionMessage]), - options: const ChatOpenAIOptions(tools: [tool]), + options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]), ); final aiMessage2 = res2.output; @@ -208,9 +208,6 @@ void main() { test('Test countTokens messages', () async { final models = [ - 'gpt-3.5-turbo-0301', - 'gpt-3.5-turbo-0613', - 'gpt-3.5-turbo-16k-0613', 'gpt-4-0314', 'gpt-4-0613', ]; @@ -270,26 +267,26 @@ void main() { expect(result.usage.totalTokens, greaterThan(0)); }); - test('Test ChatOpenAI streaming with functions', () async { - const tool = ToolSpec( - name: 'joke', - description: 'A joke', - inputJsonSchema: { - 'type': 'object', - 'properties': { - 'setup': { - 'type': 'string', - 'description': 'The setup for the joke', - }, - 'punchline': { - 'type': 'string', - 'description': 'The punchline to the joke', - }, + const jokeTool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', }, - 'required': ['location', 'punchline'], }, - ); + 'required': ['location', 'punchline'], + }, + ); + test('Test ChatOpenAI streaming with functions', () async { final promptTemplate = ChatPromptTemplate.fromTemplate( 'tell me a long joke about {foo}', ); @@ -301,7 +298,7 @@ void main() { ), ).bind( ChatOpenAIOptions( - tools: const [tool], + tools: const [jokeTool], toolChoice: ChatToolChoice.forced(name: 'joke'), ), ); @@ -330,7 +327,7 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, temperature: 0, seed: 12345, ), @@ -359,11 +356,68 @@ void main() { final llm = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-1106-preview', + model: defaultModel, + temperature: 0, + seed: 9999, + responseFormat: ChatOpenAIResponseFormat.jsonObject, + ), + ); + + final res = await llm.invoke(prompt); + final outputMsg = res.output; + final outputJson = json.decode(outputMsg.content) as Map; + expect(outputJson['companies'], isNotNull); + final companies = outputJson['companies'] as List; + expect(companies, hasLength(2)); + final firstCompany = companies.first as Map; + expect(firstCompany['name'], 'Google'); + expect(firstCompany['origin'], 'USA'); + final secondCompany = companies.last as Map; + expect(secondCompany['name'], 'Deepmind'); + expect(secondCompany['origin'], 'UK'); + }); + + test('Test Structured Output', () async { + final prompt = PromptValue.chat([ + ChatMessage.system( + 'Extract the data of any companies mentioned in the ' + 'following statement. Return a JSON list.', + ), + ChatMessage.humanText( + 'Google was founded in the USA, while Deepmind was founded in the UK', + ), + ]); + final llm = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: ChatOpenAIOptions( + model: defaultModel, temperature: 0, seed: 9999, - responseFormat: ChatOpenAIResponseFormat( - type: ChatOpenAIResponseFormatType.jsonObject, + responseFormat: ChatOpenAIResponseFormat.jsonSchema( + const ChatOpenAIJsonSchema( + name: 'Companies', + description: 'A list of companies', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'companies': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'origin': {'type': 'string'}, + }, + 'additionalProperties': false, + 'required': ['name', 'origin'], + }, + }, + }, + 'additionalProperties': false, + 'required': ['companies'], + }, + ), ), ), ); @@ -400,7 +454,7 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); @@ -427,12 +481,70 @@ void main() { final chatModel = ChatOpenAI( apiKey: openaiApiKey, defaultOptions: const ChatOpenAIOptions( - model: 'gpt-4-turbo', + model: defaultModel, ), ); final res = await chatModel.invoke(prompt); expect(res.output.content.toLowerCase(), contains('apple')); }); + + test('Test additive bind calls', () async { + final chatModel = ChatOpenAI( + apiKey: openaiApiKey, + defaultOptions: const ChatOpenAIOptions( + model: defaultModel, + temperature: 0, + ), + ); + + final chatModelWithTools = chatModel.bind( + const ChatOpenAIOptions( + tools: [getCurrentWeatherTool, jokeTool], + ), + ); + + final res1 = await chatModelWithTools.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res1.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name, jokeTool.name}, + ); + + final chatModelForceWeatherTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: getCurrentWeatherTool.name), + ), + ); + + final res2 = await chatModelForceWeatherTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res2.output.toolCalls.map((tc) => tc.name).toSet(), + {getCurrentWeatherTool.name}, + ); + + final chatModelForceJokeTool = chatModelWithTools.bind( + ChatOpenAIOptions( + toolChoice: ChatToolChoice.forced(name: jokeTool.name), + ), + ); + + final res3 = await chatModelForceJokeTool.invoke( + PromptValue.string( + 'Tell me the weather in Barcelona, Spain and a joke about bears', + ), + ); + expect( + res3.output.toolCalls.map((tc) => tc.name).toSet(), + {jokeTool.name}, + ); + }); }); } diff --git a/packages/langchain_openai/test/chat_models/github_models_test.dart b/packages/langchain_openai/test/chat_models/github_models_test.dart new file mode 100644 index 00000000..7eac34dd --- /dev/null +++ b/packages/langchain_openai/test/chat_models/github_models_test.dart @@ -0,0 +1,181 @@ +// ignore_for_file: avoid_print +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:langchain_core/chat_models.dart'; +import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; +import 'package:langchain_openai/langchain_openai.dart'; +import 'package:test/test.dart'; + +void main() { + group('GitHub Models tests', () { + late ChatOpenAI chatModel; + + setUp(() async { + chatModel = ChatOpenAI( + apiKey: Platform.environment['GITHUB_TOKEN'], + baseUrl: 'https://models.inference.ai.azure.com', + ); + }); + + tearDown(() { + chatModel.close(); + }); + + test('Test invoke GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final res = await chatModel.invoke( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + expect(res.id, isNotEmpty); + expect( + res.output.content.replaceAll(RegExp(r'[\s\n]'), ''), + contains('123456789'), + ); + expect(res.metadata, isNotEmpty, reason: model); + expect(res.metadata['created'], greaterThan(0), reason: model); + expect(res.metadata['model'], isNotEmpty, reason: model); + } + }); + + test('Test stream GitHub Models API with different models', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + final stream = chatModel.stream( + PromptValue.string( + 'List the numbers from 1 to 9 in order. ' + 'Output ONLY the numbers in one line without any spaces or commas. ' + 'NUMBERS:', + ), + options: ChatOpenAIOptions( + model: model, + temperature: 0, + ), + ); + + String content = ''; + int count = 0; + await for (final res in stream) { + content += res.output.content.replaceAll(RegExp(r'[\s\n]'), ''); + count++; + } + expect(count, greaterThan(1), reason: model); + expect(content, contains('123456789'), reason: model); + } + }); + + test('Test countTokens', () async { + final models = [ + 'gpt-4o', + 'AI21-Jamba-Instruct', + 'meta-llama-3.1-405b-instruct', + 'Mistral-large', + 'Phi-3.5-mini-instruct', + ]; + for (final model in models) { + print('Testing model: $model'); + const text = 'Hello, how are you?'; + + final numTokens = await chatModel.countTokens( + PromptValue.chat([ChatMessage.humanText(text)]), + options: ChatOpenAIOptions(model: model), + ); + expect(numTokens, 13, reason: model); + } + }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); + }); +} diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 396f8ac4..d7c8fc9c 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -1,10 +1,12 @@ @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; import 'package:test/test.dart'; @@ -25,7 +27,7 @@ void main() { test('Test invoke OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -55,7 +57,7 @@ void main() { test('Test stream OpenRouter API with different models', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', // 'google/gemini-pro', // Not supported 'anthropic/claude-2', @@ -86,7 +88,7 @@ void main() { test('Test countTokens', () async { final models = [ - 'gpt-3.5-turbo', + 'gpt-4o-mini', 'gpt-4', 'google/gemini-pro', 'anthropic/claude-2', @@ -104,5 +106,72 @@ void main() { expect(numTokens, 13, reason: model); } }); + + test('Test tool calling', timeout: const Timeout(Duration(minutes: 1)), + () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); }); } diff --git a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart b/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart deleted file mode 100644 index 988c7e4c..00000000 --- a/packages/langchain_openai/test/embeddings/anyscale_embeddings_test.dart +++ /dev/null @@ -1,36 +0,0 @@ -@TestOn('vm') -library; // Uses dart:io - -import 'dart:io'; - -import 'package:langchain_openai/langchain_openai.dart'; -import 'package:test/test.dart'; - -void main() { - group('Anyscale AI Embeddings tests', () { - late OpenAIEmbeddings embeddings; - - setUp(() async { - embeddings = OpenAIEmbeddings( - apiKey: Platform.environment['ANYSCALE_API_KEY'], - baseUrl: 'https://api.endpoints.anyscale.com/v1', - ); - }); - - tearDown(() { - embeddings.close(); - }); - - test('Test Anyscale Embeddings models', () async { - final models = [ - 'thenlper/gte-large', - ]; - for (final model in models) { - embeddings.model = model; - final res = await embeddings.embedQuery('Hello world'); - expect(res.length, greaterThan(0)); - await Future.delayed(const Duration(seconds: 1)); // Rate limit - } - }); - }); -} diff --git a/packages/langchain_openai/test/tools/dall_e_test.dart b/packages/langchain_openai/test/tools/dall_e_test.dart index 5a9aba09..7a8a8407 100644 --- a/packages/langchain_openai/test/tools/dall_e_test.dart +++ b/packages/langchain_openai/test/tools/dall_e_test.dart @@ -4,7 +4,7 @@ library; // Uses dart:io import 'dart:io'; -import 'package:langchain/langchain.dart' show AgentExecutor; +import 'package:langchain/langchain.dart' show AgentExecutor, ToolsAgent; import 'package:langchain_community/langchain_community.dart'; import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; @@ -62,7 +62,7 @@ void main() { ), ]; - final agent = OpenAIToolsAgent.fromLLMAndTools( + final agent = ToolsAgent.fromLLMAndTools( llm: llm, tools: tools, ); diff --git a/packages/langchain_pinecone/CHANGELOG.md b/packages/langchain_pinecone/CHANGELOG.md index 276d2616..f616d549 100644 --- a/packages/langchain_pinecone/CHANGELOG.md +++ b/packages/langchain_pinecone/CHANGELOG.md @@ -1,3 +1,27 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0+9 + + - Update a dependency to the latest release. + +## 0.1.0+8 + + - Update a dependency to the latest release. + +## 0.1.0+7 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.1.0+6 + + - Update a dependency to the latest release. + +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_pinecone/pubspec.yaml b/packages/langchain_pinecone/pubspec.yaml index 141a96f4..82e39fa2 100644 --- a/packages/langchain_pinecone/pubspec.yaml +++ b/packages/langchain_pinecone/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_pinecone description: LangChain.dart integration module for Pinecone fully-managed vector database. -version: 0.1.0+4 +version: 0.1.0+9 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,15 +14,15 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 - langchain_core: ^0.3.1 + http: ^1.2.2 + langchain_core: 0.3.6 meta: ^1.11.0 pinecone: ^0.7.2 - uuid: ^4.3.3 + uuid: ^4.4.2 dev_dependencies: - test: ^1.25.2 - langchain_openai: ^0.6.1 + test: ^1.25.8 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_pinecone/pubspec_overrides.yaml b/packages/langchain_pinecone/pubspec_overrides.yaml index 8dd8d545..de62cfcc 100644 --- a/packages/langchain_pinecone/pubspec_overrides.yaml +++ b/packages/langchain_pinecone/pubspec_overrides.yaml @@ -1,4 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core +# melos_managed_dependency_overrides: langchain_core,langchain_openai,openai_dart dependency_overrides: langchain_core: path: ../langchain_core diff --git a/packages/langchain_supabase/CHANGELOG.md b/packages/langchain_supabase/CHANGELOG.md index d98b5fe3..bd6956b4 100644 --- a/packages/langchain_supabase/CHANGELOG.md +++ b/packages/langchain_supabase/CHANGELOG.md @@ -1,3 +1,27 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.1+3 + + - Update a dependency to the latest release. + +## 0.1.1+2 + + - Update a dependency to the latest release. + +## 0.1.1+1 + + - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d)) + +## 0.1.1 + + - Update a dependency to the latest release. + +## 0.1.0+5 + + - Update a dependency to the latest release. + ## 0.1.0+4 - Update a dependency to the latest release. diff --git a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart index f6d1e11e..0c777f01 100644 --- a/packages/langchain_supabase/lib/src/vector_stores/supabase.dart +++ b/packages/langchain_supabase/lib/src/vector_stores/supabase.dart @@ -54,7 +54,7 @@ import 'package:supabase/supabase.dart'; /// ``` /// /// See documentation for more details: -/// - [LangChain.dart Supabase docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/supabase) +/// - [LangChain.dart Supabase docs](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase) /// - [Supabase Vector docs](https://supabase.com/docs/guides/ai) /// {@endtemplate} class Supabase extends VectorStore { diff --git a/packages/langchain_supabase/pubspec.yaml b/packages/langchain_supabase/pubspec.yaml index c480cdc7..9b5530ad 100644 --- a/packages/langchain_supabase/pubspec.yaml +++ b/packages/langchain_supabase/pubspec.yaml @@ -1,10 +1,10 @@ name: langchain_supabase description: LangChain.dart integration module for Supabase (e.g. Supabase Vector). -version: 0.1.0+4 +version: 0.1.1+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -14,16 +14,16 @@ topics: - vector-db environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - http: ^1.1.0 - langchain_core: ^0.3.1 + http: ^1.2.2 + langchain_core: 0.3.6 meta: ^1.11.0 - supabase: ^2.0.8 + supabase: ^2.2.7 dev_dependencies: - test: ^1.25.2 - langchain: ^0.7.1 - langchain_community: 0.2.0+1 - langchain_openai: ^0.6.1 + test: ^1.25.8 + langchain: ^0.7.6 + langchain_community: 0.3.2 + langchain_openai: ^0.7.2 diff --git a/packages/langchain_supabase/pubspec_overrides.yaml b/packages/langchain_supabase/pubspec_overrides.yaml index 5eb34624..b03ffbc5 100644 --- a/packages/langchain_supabase/pubspec_overrides.yaml +++ b/packages/langchain_supabase/pubspec_overrides.yaml @@ -1,5 +1,4 @@ -# melos_managed_dependency_overrides: langchain_openai,openai_dart,langchain_core,langchain_community -# melos_managed_dependency_overrides: langchain +# melos_managed_dependency_overrides: langchain,langchain_community,langchain_core,langchain_openai,openai_dart,tavily_dart dependency_overrides: langchain: path: ../langchain @@ -11,3 +10,5 @@ dependency_overrides: path: ../langchain_openai openai_dart: path: ../openai_dart + tavily_dart: + path: ../tavily_dart diff --git a/packages/langchain_weaviate/pubspec.yaml b/packages/langchain_weaviate/pubspec.yaml index f5f5de33..3d9b8cd3 100644 --- a/packages/langchain_weaviate/pubspec.yaml +++ b/packages/langchain_weaviate/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_weaviate issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_weaviate homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wikipedia/pubspec.yaml b/packages/langchain_wikipedia/pubspec.yaml index e1377267..2dcc9e5c 100644 --- a/packages/langchain_wikipedia/pubspec.yaml +++ b/packages/langchain_wikipedia/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wikipedia issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wikipedia homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langchain_wolfram/pubspec.yaml b/packages/langchain_wolfram/pubspec.yaml index b64e02a0..14b30014 100644 --- a/packages/langchain_wolfram/pubspec.yaml +++ b/packages/langchain_wolfram/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.0.1-dev.1 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wolfram issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wolfram homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev publish_to: none # Remove when the package is ready to be published topics: @@ -14,4 +14,4 @@ topics: - langchain environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/langgraph/.gitignore b/packages/langgraph/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/langgraph/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/langgraph/CHANGELOG.md b/packages/langgraph/CHANGELOG.md new file mode 100644 index 00000000..90f8e244 --- /dev/null +++ b/packages/langgraph/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/langgraph/LICENSE b/packages/langgraph/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/langgraph/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/langgraph/README.md b/packages/langgraph/README.md new file mode 100644 index 00000000..70fc2aae --- /dev/null +++ b/packages/langgraph/README.md @@ -0,0 +1,17 @@ +# 🦜🕸️LangGraph + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![langgraph](https://img.shields.io/pub/v/langgraph.svg)](https://pub.dev/packages/langgraph) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +⚡ Building language agents as graphs ⚡ + +## Overview + +TODO + +## License + +LangChain.dart is licensed under the +[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/langgraph/analysis_options.yaml b/packages/langgraph/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/langgraph/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/langgraph/example/langgraph_example.dart b/packages/langgraph/example/langgraph_example.dart new file mode 100644 index 00000000..21f3e9f2 --- /dev/null +++ b/packages/langgraph/example/langgraph_example.dart @@ -0,0 +1,3 @@ +void main() { + // TODO +} diff --git a/packages/langgraph/lib/langgraph.dart b/packages/langgraph/lib/langgraph.dart new file mode 100644 index 00000000..790b457d --- /dev/null +++ b/packages/langgraph/lib/langgraph.dart @@ -0,0 +1,2 @@ +/// Build resilient language agents as graphs. +library; diff --git a/packages/langgraph/pubspec.yaml b/packages/langgraph/pubspec.yaml new file mode 100644 index 00000000..e6ef9c18 --- /dev/null +++ b/packages/langgraph/pubspec.yaml @@ -0,0 +1,16 @@ +name: langgraph +description: Build resilient language agents as graphs. +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langgraph +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langgraph +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - nlp + - llms + - langchain + +environment: + sdk: ">=3.4.0 <4.0.0" diff --git a/packages/mistralai_dart/CHANGELOG.md b/packages/mistralai_dart/CHANGELOG.md index fcb706a7..ec5979cc 100644 --- a/packages/mistralai_dart/CHANGELOG.md +++ b/packages/mistralai_dart/CHANGELOG.md @@ -1,3 +1,15 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.0.3+3 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.0.3+2 + + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + ## 0.0.3+1 - **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635)) diff --git a/packages/mistralai_dart/lib/mistralai_dart.dart b/packages/mistralai_dart/lib/mistralai_dart.dart index 31efab90..05cfac61 100644 --- a/packages/mistralai_dart/lib/mistralai_dart.dart +++ b/packages/mistralai_dart/lib/mistralai_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -library mistralai_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show MistralAIClientException; diff --git a/packages/mistralai_dart/lib/src/http_client/http_client.dart b/packages/mistralai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/mistralai_dart/lib/src/http_client/http_client.dart +++ b/packages/mistralai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/mistralai_dart/pubspec.yaml b/packages/mistralai_dart/pubspec.yaml index a7aa8347..406b7170 100644 --- a/packages/mistralai_dart/pubspec.yaml +++ b/packages/mistralai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: mistralai_dart description: Dart Client for the Mistral AI API (Mistral-7B, Mixtral 8x7B, embeddings, etc.). -version: 0.0.3+1 +version: 0.0.3+3 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/mistralai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:mistralai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,22 +13,22 @@ topics: - mistral environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/ollama_dart/CHANGELOG.md b/packages/ollama_dart/CHANGELOG.md index 4b5ff033..d6f79865 100644 --- a/packages/ollama_dart/CHANGELOG.md +++ b/packages/ollama_dart/CHANGELOG.md @@ -1,3 +1,40 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.2.2 + + - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768)) + +## 0.2.1 + + - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7)) + +## 0.2.0 + +> Note: This release has breaking changes. + + - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975)) + - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c)) + - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8)) + - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490)) + - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41)) + - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f)) + +## 0.1.2 + + - **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9)) + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.1.1 + + - **FEAT**: Support buffered stream responses in ollama_dart ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + +## 0.1.0+1 + + - **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d)) + ## 0.1.0 > Note: This release has breaking changes. diff --git a/packages/ollama_dart/README.md b/packages/ollama_dart/README.md index 0aaf5a97..dc637664 100644 --- a/packages/ollama_dart/README.md +++ b/packages/ollama_dart/README.md @@ -17,24 +17,33 @@ Unofficial Dart client for [Ollama](https://ollama.ai/) API. **Supported endpoints:** - Completions (with streaming support) -- Chat completions +- Chat completions (with streaming and tool calling support) - Embeddings - Models - Blobs +- Version ## Table of contents - [Usage](#usage) * [Completions](#completions) + + [Generate completion](#generate-completion) + + [Stream completion](#stream-completion) * [Chat completions](#chat-completions) + + [Generate chat completion](#generate-chat-completion) + + [Stream chat completion](#stream-chat-completion) + + [Tool calling](#tool-calling) * [Embeddings](#embeddings) + + [Generate embedding](#generate-embedding) * [Models](#models) + [Create model](#create-model) + [List models](#list-models) + + [List running models](#list-running-models) + [Show Model Information](#show-model-information) + [Pull a Model](#pull-a-model) + [Push a Model](#push-a-model) + [Check if a Blob Exists](#check-if-a-blob-exists) + * [Version](#version) - [Advance Usage](#advance-usage) * [Default HTTP client](#default-http-client) * [Custom HTTP client ](#custom-http-client) @@ -52,7 +61,7 @@ Refer to the [documentation](https://github.com/jmorganca/ollama/blob/main/docs/ Given a prompt, the model will generate a response. -**Generate completion:** +#### Generate completion ```dart final generated = await client.generateCompletion( @@ -65,7 +74,7 @@ print(generated.response); // The sky appears blue because of a phenomenon called Rayleigh scattering... ``` -**Stream completion:** +#### Stream completion ```dart final stream = client.generateCompletionStream( @@ -86,7 +95,7 @@ print(text); Given a prompt, the model will generate a response in a chat format. -**Generate chat completion:** +#### Generate chat completion ```dart final res = await client.generateChatCompletion( @@ -109,7 +118,7 @@ print(res); // Message(role: MessageRole.assistant, content: 123456789) ``` -**Stream chat completion:** +#### Stream chat completion ```dart final stream = client.generateChatCompletionStream( @@ -137,11 +146,91 @@ print(text); // 123456789 ``` +#### Tool calling + +Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema, that you can then use to call the tools in your code and return the result back to the model to complete the conversation. + +**Notes:** +- Tool calling requires Ollama 0.2.8 or newer. +- Streaming tool calls is not supported at the moment. +- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2)). + +```dart +const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), +); + +const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', +); + +final res1 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.2', + messages: [userMsg], + tools: [tool], + ), +); + +print(res1.message.toolCalls); +// [ +// ToolCall( +// function: +// ToolCallFunction( +// name: get_current_weather, +// arguments: { +// location: Barcelona, ES, +// unit: celsius +// } +// ) +// ) +// ] + +// Call your tool here. For this example, we'll just mock the response. +const toolResult = '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + +// Submit the response of the tool call to the model +final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.2', + messages: [ + userMsg, + res1.message, + Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), +); +print(res2.message.content); +// The current weather in Barcelona is 20°C. +``` + ### Embeddings Given a prompt, the model will generate an embedding representing the prompt. -**Generate embedding:** +#### Generate embedding ```dart final generated = await client.generateEmbedding( @@ -192,6 +281,15 @@ final res = await client.listModels(); print(res.models); ``` +#### List running models + +Lists models currently loaded and their memory footprint. + +```dart +final res = await client.listRunningModels(); +print(res.models); +``` + #### Show Model Information Show details about a model including modelfile, template, parameters, license, and system prompt. @@ -251,16 +349,25 @@ await for (final res in stream) { #### Check if a Blob Exists -Check if a blob is known to the server. +Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai. ```dart await client.checkBlob( - name: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); ``` If the blob doesn't exist, an `OllamaClientException` exception will be thrown. +### Version + +Get the version of the Ollama server. + +```dart +final res = await client.getVersion(); +print(res.version); +``` + ## Advance Usage ### Default HTTP client diff --git a/packages/ollama_dart/example/ollama_dart_example.dart b/packages/ollama_dart/example/ollama_dart_example.dart index 15ef53d9..53dc2abf 100644 --- a/packages/ollama_dart/example/ollama_dart_example.dart +++ b/packages/ollama_dart/example/ollama_dart_example.dart @@ -11,6 +11,7 @@ Future main() async { await _generateChatCompletion(client); await _generateChatCompletionWithHistory(client); await _generateChatCompletionStream(client); + await _generateChatToolCalling(client); // Embeddings await _generateEmbedding(client); @@ -69,7 +70,7 @@ Future _generateCompletionStream(final OllamaClient client) async { Future _generateChatCompletion(final OllamaClient client) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -86,7 +87,7 @@ Future _generateChatCompletion(final OllamaClient client) async { ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionWithHistory( @@ -94,7 +95,7 @@ Future _generateChatCompletionWithHistory( ) async { final generated = await client.generateChatCompletion( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.2', messages: [ Message( role: MessageRole.user, @@ -111,13 +112,13 @@ Future _generateChatCompletionWithHistory( ], ), ); - print(generated.message?.content); + print(generated.message.content); } Future _generateChatCompletionStream(final OllamaClient client) async { final stream = client.generateChatCompletionStream( request: const GenerateChatCompletionRequest( - model: 'llama3:latest', + model: 'llama3.2', messages: [ Message( role: MessageRole.system, @@ -132,11 +133,84 @@ Future _generateChatCompletionStream(final OllamaClient client) async { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } print(text); } +Future _generateChatToolCalling(final OllamaClient client) async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + const userMsg = Message( + role: MessageRole.user, + content: 'What’s the weather like in Barcelona in celsius?', + ); + + final res1 = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: 'llama3.2', + messages: [userMsg], + tools: [tool], + keepAlive: 1, + ), + ); + + print(res1.message.toolCalls); + // [ + // ToolCall( + // function: + // ToolCallFunction( + // name: get_current_weather, + // arguments: { + // location: Barcelona, ES, + // unit: celsius + // } + // ) + // ) + // ] + + // Call your tool here. For this example, we'll just mock the response. + const toolResult = + '{"location": "Barcelona, ES", "temperature": 20, "unit": "celsius"}'; + + // Submit the response of the tool call to the model + final res2 = await client.generateChatCompletion( + request: GenerateChatCompletionRequest( + model: 'llama3.2', + messages: [ + userMsg, + res1.message, + const Message( + role: MessageRole.tool, + content: toolResult, + ), + ], + ), + ); + print(res2.message.content); + // The current weather in Barcelona is 20°C. +} + Future _generateEmbedding(final OllamaClient client) async { final generated = await client.generateEmbedding( request: const GenerateEmbeddingRequest( @@ -217,7 +291,7 @@ Future _pushModelStream(final OllamaClient client) async { Future _checkBlob(final OllamaClient client) async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); } diff --git a/packages/ollama_dart/lib/ollama_dart.dart b/packages/ollama_dart/lib/ollama_dart.dart index a62c32c4..1195c10c 100644 --- a/packages/ollama_dart/lib/ollama_dart.dart +++ b/packages/ollama_dart/lib/ollama_dart.dart @@ -1,5 +1,5 @@ /// Dart Client for the Ollama API (run Llama 3, Code Llama, and other models locally). -library ollama_dart; +library; export 'src/client.dart'; export 'src/generated/client.dart' show OllamaClientException; diff --git a/packages/ollama_dart/lib/src/client.dart b/packages/ollama_dart/lib/src/client.dart index 2bb5a7be..c5dded40 100644 --- a/packages/ollama_dart/lib/src/client.dart +++ b/packages/ollama_dart/lib/src/client.dart @@ -1,4 +1,5 @@ // ignore_for_file: use_super_parameters +import 'dart:async'; import 'dart:convert'; import 'package:http/http.dart' as http; @@ -56,11 +57,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -85,11 +84,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => GenerateChatCompletionResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => GenerateChatCompletionResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -114,11 +111,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => CreateModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => CreateModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -143,11 +138,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PullModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PullModelResponse.fromJson(json.decode(d))); } // ------------------------------------------ @@ -172,11 +165,9 @@ class OllamaClient extends g.OllamaClient { responseType: 'application/x-ndjson', body: request.copyWith(stream: true), ); - yield* r.stream.map( - (final d) => PushModelResponse.fromJson( - json.decode(const Utf8Decoder().convert(d)), - ), - ); + yield* r.stream + .transform(const _OllamaStreamTransformer()) // + .map((d) => PushModelResponse.fromJson(json.decode(d))); } @override @@ -184,3 +175,15 @@ class OllamaClient extends g.OllamaClient { return onRequestHandler(request); } } + +class _OllamaStreamTransformer + extends StreamTransformerBase, String> { + const _OllamaStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()); + } +} diff --git a/packages/ollama_dart/lib/src/generated/client.dart b/packages/ollama_dart/lib/src/generated/client.dart index 3ab44797..0a530915 100644 --- a/packages/ollama_dart/lib/src/generated/client.dart +++ b/packages/ollama_dart/lib/src/generated/client.dart @@ -356,6 +356,27 @@ class OllamaClient { ); } + // ------------------------------------------ + // METHOD: getVersion + // ------------------------------------------ + + /// Returns the version of the Ollama server. + /// + /// This endpoint returns the version of the Ollama server. + /// + /// `GET` `http://localhost:11434/api/version` + Future getVersion() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/version', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return VersionResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: generateCompletion // ------------------------------------------ @@ -477,6 +498,25 @@ class OllamaClient { return ModelsResponse.fromJson(_jsonDecode(r)); } + // ------------------------------------------ + // METHOD: listRunningModels + // ------------------------------------------ + + /// List models that are running. + /// + /// `GET` `http://localhost:11434/api/ps` + Future listRunningModels() async { + final r = await makeRequest( + baseUrl: 'http://localhost:11434/api', + path: '/ps', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return ProcessResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: showModelInfo // ------------------------------------------ @@ -567,7 +607,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PullModelResponse.fromJson(_jsonDecode(r)); @@ -593,7 +633,7 @@ class OllamaClient { method: HttpMethod.post, isMultipart: false, requestType: 'application/json', - responseType: 'application/json', + responseType: 'application/x-ndjson', body: request, ); return PushModelResponse.fromJson(_jsonDecode(r)); @@ -605,26 +645,23 @@ class OllamaClient { /// Create a blob from a file. Returns the server file path. /// - /// `name`: the SHA256 digest of the blob + /// `digest`: the SHA256 digest of the blob /// /// `request`: No description /// /// `POST` `http://localhost:11434/api/blobs/{digest}` Future createBlob({ - required String name, + required String digest, String? request, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.post, isMultipart: false, requestType: 'application/octet-stream', responseType: '', body: request, - queryParams: { - 'name': name, - }, ); } @@ -632,24 +669,23 @@ class OllamaClient { // METHOD: checkBlob // ------------------------------------------ - /// Check to see if a blob exists on the Ollama server which is useful when creating models. + /// Ensures that the file blob used for a FROM or ADAPTER field exists on the server. + /// + /// This is checking your Ollama server and not Ollama.ai. /// - /// `name`: the SHA256 digest of the blob + /// `digest`: the SHA256 digest of the blob /// /// `HEAD` `http://localhost:11434/api/blobs/{digest}` Future checkBlob({ - required String name, + required String digest, }) async { final _ = await makeRequest( baseUrl: 'http://localhost:11434/api', - path: '/blobs/{digest}', + path: '/blobs/$digest', method: HttpMethod.head, isMultipart: false, requestType: '', responseType: '', - queryParams: { - 'name': name, - }, ); } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart index 491efa66..fe47da47 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_request.dart @@ -47,6 +47,9 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { /// - If set to 0, the model will be unloaded immediately once finished. /// - If not set, the model will stay loaded for 5 minutes by default @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) List? tools, }) = _GenerateChatCompletionRequest; /// Object construction from a JSON representation @@ -60,7 +63,8 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'format', 'options', 'stream', - 'keep_alive' + 'keep_alive', + 'tools' ]; /// Perform validations on the schema property values @@ -77,6 +81,7 @@ class GenerateChatCompletionRequest with _$GenerateChatCompletionRequest { 'options': options, 'stream': stream, 'keep_alive': keepAlive, + 'tools': tools, }; } } diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart index faf7462a..d7857fd4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_chat_completion_response.dart @@ -16,18 +16,18 @@ class GenerateChatCompletionResponse with _$GenerateChatCompletionResponse { /// Factory constructor for GenerateChatCompletionResponse const factory GenerateChatCompletionResponse({ /// A message in the chat endpoint - @JsonKey(includeIfNull: false) Message? message, + required Message message, /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) String? model, + required String model, /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, + @JsonKey(name: 'created_at') required String createdAt, /// Whether the response has completed. - @JsonKey(includeIfNull: false) bool? done, + required bool done, /// Reason why the model is done generating a response. @JsonKey( diff --git a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart index 1368ac7a..014e2654 100644 --- a/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart +++ b/packages/ollama_dart/lib/src/generated/schema/generate_completion_request.dart @@ -23,6 +23,9 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { /// The prompt to generate a response. required String prompt, + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) String? suffix, + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, @@ -74,6 +77,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { static const List propertyNames = [ 'model', 'prompt', + 'suffix', 'images', 'system', 'template', @@ -95,6 +99,7 @@ class GenerateCompletionRequest with _$GenerateCompletionRequest { return { 'model': model, 'prompt': prompt, + 'suffix': suffix, 'images': images, 'system': system, 'template': template, diff --git a/packages/ollama_dart/lib/src/generated/schema/message.dart b/packages/ollama_dart/lib/src/generated/schema/message.dart index 362e2349..add48dc2 100644 --- a/packages/ollama_dart/lib/src/generated/schema/message.dart +++ b/packages/ollama_dart/lib/src/generated/schema/message.dart @@ -23,6 +23,10 @@ class Message with _$Message { /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? images, + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, }) = _Message; /// Object construction from a JSON representation @@ -30,7 +34,12 @@ class Message with _$Message { _$MessageFromJson(json); /// List of all property names of schema - static const List propertyNames = ['role', 'content', 'images']; + static const List propertyNames = [ + 'role', + 'content', + 'images', + 'tool_calls' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -43,6 +52,7 @@ class Message with _$Message { 'role': role, 'content': content, 'images': images, + 'tool_calls': toolCalls, }; } } @@ -59,4 +69,6 @@ enum MessageRole { user, @JsonValue('assistant') assistant, + @JsonValue('tool') + tool, } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_info.dart b/packages/ollama_dart/lib/src/generated/schema/model_info.dart index cb212131..30c2a949 100644 --- a/packages/ollama_dart/lib/src/generated/schema/model_info.dart +++ b/packages/ollama_dart/lib/src/generated/schema/model_info.dart @@ -33,6 +33,10 @@ class ModelInfo with _$ModelInfo { /// Details about a model. @JsonKey(includeIfNull: false) ModelDetails? details, + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, + /// The default messages for the model. @JsonKey(includeIfNull: false) List? messages, }) = _ModelInfo; @@ -49,6 +53,7 @@ class ModelInfo with _$ModelInfo { 'template', 'system', 'details', + 'model_info', 'messages' ]; @@ -66,6 +71,7 @@ class ModelInfo with _$ModelInfo { 'template': template, 'system': system, 'details': details, + 'model_info': modelInfo, 'messages': messages, }; } diff --git a/packages/ollama_dart/lib/src/generated/schema/model_information.dart b/packages/ollama_dart/lib/src/generated/schema/model_information.dart new file mode 100644 index 00000000..d10848f8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/model_information.dart @@ -0,0 +1,61 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ModelInformation +// ========================================== + +/// Details about a model. +@freezed +class ModelInformation with _$ModelInformation { + const ModelInformation._(); + + /// Factory constructor for ModelInformation + const factory ModelInformation({ + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion, + }) = _ModelInformation; + + /// Object construction from a JSON representation + factory ModelInformation.fromJson(Map json) => + _$ModelInformationFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'general.architecture', + 'general.file_type', + 'general.parameter_count', + 'general.quantization_version' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'general.architecture': generalArchitecture, + 'general.file_type': generalFileType, + 'general.parameter_count': generalParameterCount, + 'general.quantization_version': generalQuantizationVersion, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_model.dart b/packages/ollama_dart/lib/src/generated/schema/process_model.dart new file mode 100644 index 00000000..dad453f0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_model.dart @@ -0,0 +1,69 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessModel +// ========================================== + +/// A model that is currently loaded. +@freezed +class ProcessModel with _$ProcessModel { + const ProcessModel._(); + + /// Factory constructor for ProcessModel + const factory ProcessModel({ + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) String? model, + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) int? size, + + /// The model's digest. + @JsonKey(includeIfNull: false) String? digest, + + /// Details about a model. + @JsonKey(includeIfNull: false) ModelDetails? details, + + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram, + }) = _ProcessModel; + + /// Object construction from a JSON representation + factory ProcessModel.fromJson(Map json) => + _$ProcessModelFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'size', + 'digest', + 'details', + 'expires_at', + 'size_vram' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'size': size, + 'digest': digest, + 'details': details, + 'expires_at': expiresAt, + 'size_vram': sizeVram, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/process_response.dart b/packages/ollama_dart/lib/src/generated/schema/process_response.dart new file mode 100644 index 00000000..6261a813 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/process_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ProcessResponse +// ========================================== + +/// Response class for the list running models endpoint. +@freezed +class ProcessResponse with _$ProcessResponse { + const ProcessResponse._(); + + /// Factory constructor for ProcessResponse + const factory ProcessResponse({ + /// List of running models. + @JsonKey(includeIfNull: false) List? models, + }) = _ProcessResponse; + + /// Object construction from a JSON representation + factory ProcessResponse.fromJson(Map json) => + _$ProcessResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['models']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'models': models, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart index bdfb3574..d3bb5142 100644 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart +++ b/packages/ollama_dart/lib/src/generated/schema/push_model_response.dart @@ -16,11 +16,7 @@ class PushModelResponse with _$PushModelResponse { /// Factory constructor for PushModelResponse const factory PushModelResponse({ /// Status pushing the model. - @JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue, - ) - PushModelStatus? status, + @JsonKey(includeIfNull: false) String? status, /// the model's digest @JsonKey(includeIfNull: false) String? digest, diff --git a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart b/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart deleted file mode 100644 index c043c843..00000000 --- a/packages/ollama_dart/lib/src/generated/schema/push_model_status.dart +++ /dev/null @@ -1,21 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of ollama_schema; - -// ========================================== -// ENUM: PushModelStatus -// ========================================== - -/// Status pushing the model. -enum PushModelStatus { - @JsonValue('retrieving manifest') - retrievingManifest, - @JsonValue('starting upload') - startingUpload, - @JsonValue('pushing manifest') - pushingManifest, - @JsonValue('success') - success, -} diff --git a/packages/ollama_dart/lib/src/generated/schema/request_options.dart b/packages/ollama_dart/lib/src/generated/schema/request_options.dart index a83df364..940d09d4 100644 --- a/packages/ollama_dart/lib/src/generated/schema/request_options.dart +++ b/packages/ollama_dart/lib/src/generated/schema/request_options.dart @@ -18,68 +18,90 @@ class RequestOptions with _$RequestOptions { /// Number of tokens to keep from the prompt. @JsonKey(name: 'num_keep', includeIfNull: false) int? numKeep, - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? seed, - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? topK, - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? topP, - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, + + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? temperature, - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? repeatPenalty, - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? mirostat, - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? mirostatTau, - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? mirostatEta, - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? penalizeNewline, - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? stop, /// Enable NUMA support. (Default: false) @JsonKey(includeIfNull: false) bool? numa, - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? numCtx, - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? numBatch, - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? numGpu, /// The GPU to use for the main model. Default is 0. @@ -88,7 +110,7 @@ class RequestOptions with _$RequestOptions { /// Enable low VRAM mode. (Default: false) @JsonKey(name: 'low_vram', includeIfNull: false) bool? lowVram, - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? f16Kv, /// Enable logits all. (Default: false) @@ -103,7 +125,9 @@ class RequestOptions with _$RequestOptions { /// Enable mlock. (Default: false) @JsonKey(name: 'use_mlock', includeIfNull: false) bool? useMlock, - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? numThread, }) = _RequestOptions; @@ -118,6 +142,7 @@ class RequestOptions with _$RequestOptions { 'num_predict', 'top_k', 'top_p', + 'min_p', 'tfs_z', 'typical_p', 'repeat_last_n', @@ -157,6 +182,7 @@ class RequestOptions with _$RequestOptions { 'num_predict': numPredict, 'top_k': topK, 'top_p': topP, + 'min_p': minP, 'tfs_z': tfsZ, 'typical_p': typicalP, 'repeat_last_n': repeatLastN, diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.dart b/packages/ollama_dart/lib/src/generated/schema/schema.dart index 5c8eb964..f951912a 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.dart @@ -13,11 +13,18 @@ part 'schema.freezed.dart'; part 'generate_completion_request.dart'; part 'request_options.dart'; part 'response_format.dart'; +part 'version_response.dart'; part 'generate_completion_response.dart'; part 'generate_chat_completion_request.dart'; part 'generate_chat_completion_response.dart'; part 'done_reason.dart'; part 'message.dart'; +part 'tool.dart'; +part 'tool_function.dart'; +part 'tool_function_params.dart'; +part 'tool_call.dart'; +part 'tool_call_function.dart'; +part 'tool_call_function_args.dart'; part 'generate_embedding_request.dart'; part 'generate_embedding_response.dart'; part 'create_model_request.dart'; @@ -26,6 +33,9 @@ part 'create_model_status.dart'; part 'models_response.dart'; part 'model.dart'; part 'model_details.dart'; +part 'model_information.dart'; +part 'process_response.dart'; +part 'process_model.dart'; part 'model_info_request.dart'; part 'model_info.dart'; part 'copy_model_request.dart'; @@ -35,4 +45,3 @@ part 'pull_model_response.dart'; part 'pull_model_status.dart'; part 'push_model_request.dart'; part 'push_model_response.dart'; -part 'push_model_status.dart'; diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart index ab02ac2b..1c77269c 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.freezed.dart @@ -29,6 +29,10 @@ mixin _$GenerateCompletionRequest { /// The prompt to generate a response. String get prompt => throw _privateConstructorUsedError; + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix => throw _privateConstructorUsedError; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; @@ -91,6 +95,7 @@ abstract class $GenerateCompletionRequestCopyWith<$Res> { $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -123,6 +128,7 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -142,6 +148,10 @@ class _$GenerateCompletionRequestCopyWithImpl<$Res, ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value.images : images // ignore: cast_nullable_to_non_nullable @@ -206,6 +216,7 @@ abstract class _$$GenerateCompletionRequestImplCopyWith<$Res> $Res call( {String model, String prompt, + @JsonKey(includeIfNull: false) String? suffix, @JsonKey(includeIfNull: false) List? images, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) String? template, @@ -238,6 +249,7 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> $Res call({ Object? model = null, Object? prompt = null, + Object? suffix = freezed, Object? images = freezed, Object? system = freezed, Object? template = freezed, @@ -257,6 +269,10 @@ class __$$GenerateCompletionRequestImplCopyWithImpl<$Res> ? _value.prompt : prompt // ignore: cast_nullable_to_non_nullable as String, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, images: freezed == images ? _value._images : images // ignore: cast_nullable_to_non_nullable @@ -303,6 +319,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { const _$GenerateCompletionRequestImpl( {required this.model, required this.prompt, + @JsonKey(includeIfNull: false) this.suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.template, @@ -332,6 +349,11 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override final String prompt; + /// The text that comes after the inserted text. + @override + @JsonKey(includeIfNull: false) + final String? suffix; + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) final List? _images; @@ -409,7 +431,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { @override String toString() { - return 'GenerateCompletionRequest(model: $model, prompt: $prompt, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateCompletionRequest(model: $model, prompt: $prompt, suffix: $suffix, images: $images, system: $system, template: $template, context: $context, options: $options, format: $format, raw: $raw, stream: $stream, keepAlive: $keepAlive)'; } @override @@ -419,6 +441,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { other is _$GenerateCompletionRequestImpl && (identical(other.model, model) || other.model == model) && (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.suffix, suffix) || other.suffix == suffix) && const DeepCollectionEquality().equals(other._images, _images) && (identical(other.system, system) || other.system == system) && (identical(other.template, template) || @@ -438,6 +461,7 @@ class _$GenerateCompletionRequestImpl extends _GenerateCompletionRequest { runtimeType, model, prompt, + suffix, const DeepCollectionEquality().hash(_images), system, template, @@ -467,6 +491,7 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { const factory _GenerateCompletionRequest( {required final String model, required final String prompt, + @JsonKey(includeIfNull: false) final String? suffix, @JsonKey(includeIfNull: false) final List? images, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final String? template, @@ -497,6 +522,11 @@ abstract class _GenerateCompletionRequest extends GenerateCompletionRequest { String get prompt; @override + /// The text that comes after the inserted text. + @JsonKey(includeIfNull: false) + String? get suffix; + @override + /// (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @JsonKey(includeIfNull: false) List? get images; @@ -567,67 +597,89 @@ mixin _$RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) int? get numKeep => throw _privateConstructorUsedError; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict => throw _privateConstructorUsedError; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK => throw _privateConstructorUsedError; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP => throw _privateConstructorUsedError; + + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ => throw _privateConstructorUsedError; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP => throw _privateConstructorUsedError; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN => throw _privateConstructorUsedError; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty => throw _privateConstructorUsedError; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat => throw _privateConstructorUsedError; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau => throw _privateConstructorUsedError; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta => throw _privateConstructorUsedError; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline => throw _privateConstructorUsedError; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop => throw _privateConstructorUsedError; @@ -635,15 +687,16 @@ mixin _$RequestOptions { @JsonKey(includeIfNull: false) bool? get numa => throw _privateConstructorUsedError; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx => throw _privateConstructorUsedError; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch => throw _privateConstructorUsedError; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu => throw _privateConstructorUsedError; @@ -655,7 +708,7 @@ mixin _$RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) bool? get lowVram => throw _privateConstructorUsedError; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv => throw _privateConstructorUsedError; @@ -675,7 +728,9 @@ mixin _$RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) bool? get useMlock => throw _privateConstructorUsedError; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread => throw _privateConstructorUsedError; @@ -697,6 +752,7 @@ abstract class $RequestOptionsCopyWith<$Res> { @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -745,6 +801,7 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -791,6 +848,10 @@ class _$RequestOptionsCopyWithImpl<$Res, $Val extends RequestOptions> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -905,6 +966,7 @@ abstract class _$$RequestOptionsImplCopyWith<$Res> @JsonKey(name: 'num_predict', includeIfNull: false) int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) int? topK, @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? repeatLastN, @@ -951,6 +1013,7 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> Object? numPredict = freezed, Object? topK = freezed, Object? topP = freezed, + Object? minP = freezed, Object? tfsZ = freezed, Object? typicalP = freezed, Object? repeatLastN = freezed, @@ -997,6 +1060,10 @@ class __$$RequestOptionsImplCopyWithImpl<$Res> ? _value.topP : topP // ignore: cast_nullable_to_non_nullable as double?, + minP: freezed == minP + ? _value.minP + : minP // ignore: cast_nullable_to_non_nullable + as double?, tfsZ: freezed == tfsZ ? _value.tfsZ : tfsZ // ignore: cast_nullable_to_non_nullable @@ -1106,6 +1173,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) this.numPredict, @JsonKey(name: 'top_k', includeIfNull: false) this.topK, @JsonKey(name: 'top_p', includeIfNull: false) this.topP, + @JsonKey(name: 'min_p', includeIfNull: false) this.minP, @JsonKey(name: 'tfs_z', includeIfNull: false) this.tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) this.typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) this.repeatLastN, @@ -1144,85 +1212,109 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'num_keep', includeIfNull: false) final int? numKeep; - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @override @JsonKey(includeIfNull: false) final int? seed; - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @override @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict; - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @override @JsonKey(name: 'top_k', includeIfNull: false) final int? topK; - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @override + @JsonKey(name: 'min_p', includeIfNull: false) + final double? minP; + + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @override @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ; - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @override @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP; - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @override @JsonKey(name: 'repeat_last_n', includeIfNull: false) final int? repeatLastN; - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @override @JsonKey(includeIfNull: false) final double? temperature; - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @override @JsonKey(name: 'repeat_penalty', includeIfNull: false) final double? repeatPenalty; - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @override @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty; - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @override @JsonKey(includeIfNull: false) final int? mirostat; - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @override @JsonKey(name: 'mirostat_tau', includeIfNull: false) final double? mirostatTau; - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @override @JsonKey(name: 'mirostat_eta', includeIfNull: false) final double? mirostatEta; - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @override @JsonKey(name: 'penalize_newline', includeIfNull: false) final bool? penalizeNewline; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. final List? _stop; - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @override @JsonKey(includeIfNull: false) List? get stop { @@ -1238,17 +1330,18 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(includeIfNull: false) final bool? numa; - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @override @JsonKey(name: 'num_ctx', includeIfNull: false) final int? numCtx; - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @override @JsonKey(name: 'num_batch', includeIfNull: false) final int? numBatch; - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @override @JsonKey(name: 'num_gpu', includeIfNull: false) final int? numGpu; @@ -1263,7 +1356,7 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'low_vram', includeIfNull: false) final bool? lowVram; - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @override @JsonKey(name: 'f16_kv', includeIfNull: false) final bool? f16Kv; @@ -1288,14 +1381,16 @@ class _$RequestOptionsImpl extends _RequestOptions { @JsonKey(name: 'use_mlock', includeIfNull: false) final bool? useMlock; - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @override @JsonKey(name: 'num_thread', includeIfNull: false) final int? numThread; @override String toString() { - return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; + return 'RequestOptions(numKeep: $numKeep, seed: $seed, numPredict: $numPredict, topK: $topK, topP: $topP, minP: $minP, tfsZ: $tfsZ, typicalP: $typicalP, repeatLastN: $repeatLastN, temperature: $temperature, repeatPenalty: $repeatPenalty, presencePenalty: $presencePenalty, frequencyPenalty: $frequencyPenalty, mirostat: $mirostat, mirostatTau: $mirostatTau, mirostatEta: $mirostatEta, penalizeNewline: $penalizeNewline, stop: $stop, numa: $numa, numCtx: $numCtx, numBatch: $numBatch, numGpu: $numGpu, mainGpu: $mainGpu, lowVram: $lowVram, f16Kv: $f16Kv, logitsAll: $logitsAll, vocabOnly: $vocabOnly, useMmap: $useMmap, useMlock: $useMlock, numThread: $numThread)'; } @override @@ -1309,6 +1404,7 @@ class _$RequestOptionsImpl extends _RequestOptions { other.numPredict == numPredict) && (identical(other.topK, topK) || other.topK == topK) && (identical(other.topP, topP) || other.topP == topP) && + (identical(other.minP, minP) || other.minP == minP) && (identical(other.tfsZ, tfsZ) || other.tfsZ == tfsZ) && (identical(other.typicalP, typicalP) || other.typicalP == typicalP) && @@ -1359,6 +1455,7 @@ class _$RequestOptionsImpl extends _RequestOptions { numPredict, topK, topP, + minP, tfsZ, typicalP, repeatLastN, @@ -1407,6 +1504,7 @@ abstract class _RequestOptions extends RequestOptions { @JsonKey(name: 'num_predict', includeIfNull: false) final int? numPredict, @JsonKey(name: 'top_k', includeIfNull: false) final int? topK, @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @JsonKey(name: 'min_p', includeIfNull: false) final double? minP, @JsonKey(name: 'tfs_z', includeIfNull: false) final double? tfsZ, @JsonKey(name: 'typical_p', includeIfNull: false) final double? typicalP, @JsonKey(name: 'repeat_last_n', includeIfNull: false) @@ -1451,82 +1549,105 @@ abstract class _RequestOptions extends RequestOptions { int? get numKeep; @override - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model + /// generate the same text for the same prompt. (Default: 0) @JsonKey(includeIfNull: false) int? get seed; @override - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text. + /// (Default: 128, -1 = infinite generation, -2 = fill context) @JsonKey(name: 'num_predict', includeIfNull: false) int? get numPredict; @override - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + /// while a lower value (e.g. 10) will be more conservative. (Default: 40) @JsonKey(name: 'top_k', includeIfNull: false) int? get topK; @override - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; @override - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + /// probability for a token to be considered, relative to the probability of the most likely token. For + /// example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + /// than 0.05*0.9=0.045 are filtered out. (Default: 0.0) + @JsonKey(name: 'min_p', includeIfNull: false) + double? get minP; + @override + + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) @JsonKey(name: 'tfs_z', includeIfNull: false) double? get tfsZ; @override - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) @JsonKey(name: 'typical_p', includeIfNull: false) double? get typicalP; @override - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition. + /// (Default: 64, 0 = disabled, -1 = num_ctx) @JsonKey(name: 'repeat_last_n', includeIfNull: false) int? get repeatLastN; @override - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively. + /// (Default: 0.8) @JsonKey(includeIfNull: false) double? get temperature; @override - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) @JsonKey(name: 'repeat_penalty', includeIfNull: false) double? get repeatPenalty; @override - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the + /// model's likelihood to talk about new topics. (Default: 0) @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; @override - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + /// model's likelihood to repeat the same line verbatim. (Default: 0) @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; @override - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity. + /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) @JsonKey(includeIfNull: false) int? get mirostat; @override - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more + /// focused and coherent text. (Default: 5.0) @JsonKey(name: 'mirostat_tau', includeIfNull: false) double? get mirostatTau; @override - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + /// (Default: 0.1) @JsonKey(name: 'mirostat_eta', includeIfNull: false) double? get mirostatEta; @override - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) @JsonKey(name: 'penalize_newline', includeIfNull: false) bool? get penalizeNewline; @override - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop + /// sequence. @JsonKey(includeIfNull: false) List? get stop; @override @@ -1536,17 +1657,18 @@ abstract class _RequestOptions extends RequestOptions { bool? get numa; @override - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) @JsonKey(name: 'num_ctx', includeIfNull: false) int? get numCtx; @override - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) @JsonKey(name: 'num_batch', includeIfNull: false) int? get numBatch; @override - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s). + /// On macOS it defaults to 1 to enable metal support, 0 to disable. @JsonKey(name: 'num_gpu', includeIfNull: false) int? get numGpu; @override @@ -1561,7 +1683,7 @@ abstract class _RequestOptions extends RequestOptions { bool? get lowVram; @override - /// Enable f16 key/value. (Default: false) + /// Enable f16 key/value. (Default: true) @JsonKey(name: 'f16_kv', includeIfNull: false) bool? get f16Kv; @override @@ -1586,7 +1708,9 @@ abstract class _RequestOptions extends RequestOptions { bool? get useMlock; @override - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + /// performance. It is recommended to set this value to the number of physical CPU cores your system has + /// (as opposed to the logical number of cores). @JsonKey(name: 'num_thread', includeIfNull: false) int? get numThread; @override @@ -1595,6 +1719,154 @@ abstract class _RequestOptions extends RequestOptions { throw _privateConstructorUsedError; } +VersionResponse _$VersionResponseFromJson(Map json) { + return _VersionResponse.fromJson(json); +} + +/// @nodoc +mixin _$VersionResponse { + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $VersionResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $VersionResponseCopyWith<$Res> { + factory $VersionResponseCopyWith( + VersionResponse value, $Res Function(VersionResponse) then) = + _$VersionResponseCopyWithImpl<$Res, VersionResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class _$VersionResponseCopyWithImpl<$Res, $Val extends VersionResponse> + implements $VersionResponseCopyWith<$Res> { + _$VersionResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_value.copyWith( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$VersionResponseImplCopyWith<$Res> + implements $VersionResponseCopyWith<$Res> { + factory _$$VersionResponseImplCopyWith(_$VersionResponseImpl value, + $Res Function(_$VersionResponseImpl) then) = + __$$VersionResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) String? version}); +} + +/// @nodoc +class __$$VersionResponseImplCopyWithImpl<$Res> + extends _$VersionResponseCopyWithImpl<$Res, _$VersionResponseImpl> + implements _$$VersionResponseImplCopyWith<$Res> { + __$$VersionResponseImplCopyWithImpl( + _$VersionResponseImpl _value, $Res Function(_$VersionResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? version = freezed, + }) { + return _then(_$VersionResponseImpl( + version: freezed == version + ? _value.version + : version // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$VersionResponseImpl extends _VersionResponse { + const _$VersionResponseImpl({@JsonKey(includeIfNull: false) this.version}) + : super._(); + + factory _$VersionResponseImpl.fromJson(Map json) => + _$$VersionResponseImplFromJson(json); + + /// The version of the Ollama server. + @override + @JsonKey(includeIfNull: false) + final String? version; + + @override + String toString() { + return 'VersionResponse(version: $version)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VersionResponseImpl && + (identical(other.version, version) || other.version == version)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, version); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + __$$VersionResponseImplCopyWithImpl<_$VersionResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$VersionResponseImplToJson( + this, + ); + } +} + +abstract class _VersionResponse extends VersionResponse { + const factory _VersionResponse( + {@JsonKey(includeIfNull: false) final String? version}) = + _$VersionResponseImpl; + const _VersionResponse._() : super._(); + + factory _VersionResponse.fromJson(Map json) = + _$VersionResponseImpl.fromJson; + + @override + + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) + String? get version; + @override + @JsonKey(ignore: true) + _$$VersionResponseImplCopyWith<_$VersionResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + GenerateCompletionResponse _$GenerateCompletionResponseFromJson( Map json) { return _GenerateCompletionResponse.fromJson(json); @@ -2130,6 +2402,10 @@ mixin _$GenerateChatCompletionRequest { @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive => throw _privateConstructorUsedError; + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $GenerateChatCompletionRequestCopyWith @@ -2153,7 +2429,8 @@ abstract class $GenerateChatCompletionRequestCopyWith<$Res> { ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); $RequestOptionsCopyWith<$Res>? get options; } @@ -2178,6 +2455,7 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_value.copyWith( model: null == model @@ -2204,6 +2482,10 @@ class _$GenerateChatCompletionRequestCopyWithImpl<$Res, ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value.tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } @@ -2238,7 +2520,8 @@ abstract class _$$GenerateChatCompletionRequestImplCopyWith<$Res> ResponseFormat? format, @JsonKey(includeIfNull: false) RequestOptions? options, bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive, + @JsonKey(includeIfNull: false) List? tools}); @override $RequestOptionsCopyWith<$Res>? get options; @@ -2263,6 +2546,7 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> Object? options = freezed, Object? stream = null, Object? keepAlive = freezed, + Object? tools = freezed, }) { return _then(_$GenerateChatCompletionRequestImpl( model: null == model @@ -2289,6 +2573,10 @@ class __$$GenerateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.keepAlive : keepAlive // ignore: cast_nullable_to_non_nullable as int?, + tools: freezed == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -2306,8 +2594,10 @@ class _$GenerateChatCompletionRequestImpl this.format, @JsonKey(includeIfNull: false) this.options, this.stream = false, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive, + @JsonKey(includeIfNull: false) final List? tools}) : _messages = messages, + _tools = tools, super._(); factory _$GenerateChatCompletionRequestImpl.fromJson( @@ -2361,9 +2651,23 @@ class _$GenerateChatCompletionRequestImpl @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive; + /// A list of tools the model may call. + final List? _tools; + + /// A list of tools the model may call. + @override + @JsonKey(includeIfNull: false) + List? get tools { + final value = _tools; + if (value == null) return null; + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive)'; + return 'GenerateChatCompletionRequest(model: $model, messages: $messages, format: $format, options: $options, stream: $stream, keepAlive: $keepAlive, tools: $tools)'; } @override @@ -2377,7 +2681,8 @@ class _$GenerateChatCompletionRequestImpl (identical(other.options, options) || other.options == options) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive)); + other.keepAlive == keepAlive) && + const DeepCollectionEquality().equals(other._tools, _tools)); } @JsonKey(ignore: true) @@ -2389,7 +2694,8 @@ class _$GenerateChatCompletionRequestImpl format, options, stream, - keepAlive); + keepAlive, + const DeepCollectionEquality().hash(_tools)); @JsonKey(ignore: true) @override @@ -2418,8 +2724,9 @@ abstract class _GenerateChatCompletionRequest final ResponseFormat? format, @JsonKey(includeIfNull: false) final RequestOptions? options, final bool stream, - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive}) = _$GenerateChatCompletionRequestImpl; + @JsonKey(name: 'keep_alive', includeIfNull: false) final int? keepAlive, + @JsonKey(includeIfNull: false) + final List? tools}) = _$GenerateChatCompletionRequestImpl; const _GenerateChatCompletionRequest._() : super._(); factory _GenerateChatCompletionRequest.fromJson(Map json) = @@ -2465,6 +2772,11 @@ abstract class _GenerateChatCompletionRequest @JsonKey(name: 'keep_alive', includeIfNull: false) int? get keepAlive; @override + + /// A list of tools the model may call. + @JsonKey(includeIfNull: false) + List? get tools; + @override @JsonKey(ignore: true) _$$GenerateChatCompletionRequestImplCopyWith< _$GenerateChatCompletionRequestImpl> @@ -2479,22 +2791,19 @@ GenerateChatCompletionResponse _$GenerateChatCompletionResponseFromJson( /// @nodoc mixin _$GenerateChatCompletionResponse { /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message => throw _privateConstructorUsedError; + Message get message => throw _privateConstructorUsedError; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; + String get model => throw _privateConstructorUsedError; /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt => throw _privateConstructorUsedError; + @JsonKey(name: 'created_at') + String get createdAt => throw _privateConstructorUsedError; /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done => throw _privateConstructorUsedError; + bool get done => throw _privateConstructorUsedError; /// Reason why the model is done generating a response. @JsonKey( @@ -2542,10 +2851,10 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { GenerateChatCompletionResponse>; @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2560,7 +2869,7 @@ abstract class $GenerateChatCompletionResponseCopyWith<$Res> { @JsonKey(name: 'eval_count', includeIfNull: false) int? evalCount, @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2577,10 +2886,10 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2590,22 +2899,22 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, Object? evalDuration = freezed, }) { return _then(_value.copyWith( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2639,12 +2948,8 @@ class _$GenerateChatCompletionResponseCopyWithImpl<$Res, @override @pragma('vm:prefer-inline') - $MessageCopyWith<$Res>? get message { - if (_value.message == null) { - return null; - } - - return $MessageCopyWith<$Res>(_value.message!, (value) { + $MessageCopyWith<$Res> get message { + return $MessageCopyWith<$Res>(_value.message, (value) { return _then(_value.copyWith(message: value) as $Val); }); } @@ -2660,10 +2965,10 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey(includeIfNull: false) Message? message, - @JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'created_at', includeIfNull: false) String? createdAt, - @JsonKey(includeIfNull: false) bool? done, + {Message message, + String model, + @JsonKey(name: 'created_at') String createdAt, + bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2679,7 +2984,7 @@ abstract class _$$GenerateChatCompletionResponseImplCopyWith<$Res> @JsonKey(name: 'eval_duration', includeIfNull: false) int? evalDuration}); @override - $MessageCopyWith<$Res>? get message; + $MessageCopyWith<$Res> get message; } /// @nodoc @@ -2695,10 +3000,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> @pragma('vm:prefer-inline') @override $Res call({ - Object? message = freezed, - Object? model = freezed, - Object? createdAt = freezed, - Object? done = freezed, + Object? message = null, + Object? model = null, + Object? createdAt = null, + Object? done = null, Object? doneReason = freezed, Object? totalDuration = freezed, Object? loadDuration = freezed, @@ -2708,22 +3013,22 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> Object? evalDuration = freezed, }) { return _then(_$GenerateChatCompletionResponseImpl( - message: freezed == message + message: null == message ? _value.message : message // ignore: cast_nullable_to_non_nullable - as Message?, - model: freezed == model + as Message, + model: null == model ? _value.model : model // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: freezed == createdAt + as String, + createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable - as String?, - done: freezed == done + as String, + done: null == done ? _value.done : done // ignore: cast_nullable_to_non_nullable - as bool?, + as bool, doneReason: freezed == doneReason ? _value.doneReason : doneReason // ignore: cast_nullable_to_non_nullable @@ -2761,10 +3066,10 @@ class __$$GenerateChatCompletionResponseImplCopyWithImpl<$Res> class _$GenerateChatCompletionResponseImpl extends _GenerateChatCompletionResponse { const _$GenerateChatCompletionResponseImpl( - {@JsonKey(includeIfNull: false) this.message, - @JsonKey(includeIfNull: false) this.model, - @JsonKey(name: 'created_at', includeIfNull: false) this.createdAt, - @JsonKey(includeIfNull: false) this.done, + {required this.message, + required this.model, + @JsonKey(name: 'created_at') required this.createdAt, + required this.done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2786,25 +3091,22 @@ class _$GenerateChatCompletionResponseImpl /// A message in the chat endpoint @override - @JsonKey(includeIfNull: false) - final Message? message; + final Message message; /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - @JsonKey(includeIfNull: false) - final String? model; + final String model; /// Date on which a model was created. @override - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt; + @JsonKey(name: 'created_at') + final String createdAt; /// Whether the response has completed. @override - @JsonKey(includeIfNull: false) - final bool? done; + final bool done; /// Reason why the model is done generating a response. @override @@ -2910,11 +3212,10 @@ class _$GenerateChatCompletionResponseImpl abstract class _GenerateChatCompletionResponse extends GenerateChatCompletionResponse { const factory _GenerateChatCompletionResponse( - {@JsonKey(includeIfNull: false) final Message? message, - @JsonKey(includeIfNull: false) final String? model, - @JsonKey(name: 'created_at', includeIfNull: false) - final String? createdAt, - @JsonKey(includeIfNull: false) final bool? done, + {required final Message message, + required final String model, + @JsonKey(name: 'created_at') required final String createdAt, + required final bool done, @JsonKey( name: 'done_reason', includeIfNull: false, @@ -2939,25 +3240,22 @@ abstract class _GenerateChatCompletionResponse @override /// A message in the chat endpoint - @JsonKey(includeIfNull: false) - Message? get message; + Message get message; @override /// The model name. /// /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; + String get model; @override /// Date on which a model was created. - @JsonKey(name: 'created_at', includeIfNull: false) - String? get createdAt; + @JsonKey(name: 'created_at') + String get createdAt; @override /// Whether the response has completed. - @JsonKey(includeIfNull: false) - bool? get done; + bool get done; @override /// Reason why the model is done generating a response. @@ -3019,6 +3317,10 @@ mixin _$Message { @JsonKey(includeIfNull: false) List? get images => throw _privateConstructorUsedError; + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls => throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $MessageCopyWith get copyWith => throw _privateConstructorUsedError; @@ -3032,7 +3334,9 @@ abstract class $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3051,6 +3355,7 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_value.copyWith( role: null == role @@ -3065,6 +3370,10 @@ class _$MessageCopyWithImpl<$Res, $Val extends Message> ? _value.images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value.toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -3079,7 +3388,9 @@ abstract class _$$MessageImplCopyWith<$Res> implements $MessageCopyWith<$Res> { $Res call( {MessageRole role, String content, - @JsonKey(includeIfNull: false) List? images}); + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc @@ -3096,6 +3407,7 @@ class __$$MessageImplCopyWithImpl<$Res> Object? role = null, Object? content = null, Object? images = freezed, + Object? toolCalls = freezed, }) { return _then(_$MessageImpl( role: null == role @@ -3110,6 +3422,10 @@ class __$$MessageImplCopyWithImpl<$Res> ? _value._images : images // ignore: cast_nullable_to_non_nullable as List?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -3120,8 +3436,11 @@ class _$MessageImpl extends _Message { const _$MessageImpl( {required this.role, required this.content, - @JsonKey(includeIfNull: false) final List? images}) + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) : _images = images, + _toolCalls = toolCalls, super._(); factory _$MessageImpl.fromJson(Map json) => @@ -3149,9 +3468,23 @@ class _$MessageImpl extends _Message { return EqualUnmodifiableListView(value); } + /// A list of tools the model wants to call. + final List? _toolCalls; + + /// A list of tools the model wants to call. + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'Message(role: $role, content: $content, images: $images)'; + return 'Message(role: $role, content: $content, images: $images, toolCalls: $toolCalls)'; } @override @@ -3161,13 +3494,19 @@ class _$MessageImpl extends _Message { other is _$MessageImpl && (identical(other.role, role) || other.role == role) && (identical(other.content, content) || other.content == content) && - const DeepCollectionEquality().equals(other._images, _images)); + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, role, content, const DeepCollectionEquality().hash(_images)); + runtimeType, + role, + content, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_toolCalls)); @JsonKey(ignore: true) @override @@ -3185,10 +3524,11 @@ class _$MessageImpl extends _Message { abstract class _Message extends Message { const factory _Message( - {required final MessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final List? images}) = - _$MessageImpl; + {required final MessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) = _$MessageImpl; const _Message._() : super._(); factory _Message.fromJson(Map json) = _$MessageImpl.fromJson; @@ -3207,65 +3547,49 @@ abstract class _Message extends Message { @JsonKey(includeIfNull: false) List? get images; @override + + /// A list of tools the model wants to call. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + @override @JsonKey(ignore: true) _$$MessageImplCopyWith<_$MessageImpl> get copyWith => throw _privateConstructorUsedError; } -GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( - Map json) { - return _GenerateEmbeddingRequest.fromJson(json); +Tool _$ToolFromJson(Map json) { + return _Tool.fromJson(json); } /// @nodoc -mixin _$GenerateEmbeddingRequest { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model => throw _privateConstructorUsedError; - - /// Text to generate embeddings for. - String get prompt => throw _privateConstructorUsedError; +mixin _$Tool { + /// The type of tool. + ToolType get type => throw _privateConstructorUsedError; - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + /// A function that the model may call. @JsonKey(includeIfNull: false) - RequestOptions? get options => throw _privateConstructorUsedError; - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @JsonKey(name: 'keep_alive', includeIfNull: false) - int? get keepAlive => throw _privateConstructorUsedError; + ToolFunction? get function => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $GenerateEmbeddingRequestCopyWith get copyWith => - throw _privateConstructorUsedError; + $ToolCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $GenerateEmbeddingRequestCopyWith<$Res> { - factory $GenerateEmbeddingRequestCopyWith(GenerateEmbeddingRequest value, - $Res Function(GenerateEmbeddingRequest) then) = - _$GenerateEmbeddingRequestCopyWithImpl<$Res, GenerateEmbeddingRequest>; +abstract class $ToolCopyWith<$Res> { + factory $ToolCopyWith(Tool value, $Res Function(Tool) then) = + _$ToolCopyWithImpl<$Res, Tool>; @useResult $Res call( - {String model, - String prompt, - @JsonKey(includeIfNull: false) RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); - $RequestOptionsCopyWith<$Res>? get options; + $ToolFunctionCopyWith<$Res>? get function; } /// @nodoc -class _$GenerateEmbeddingRequestCopyWithImpl<$Res, - $Val extends GenerateEmbeddingRequest> - implements $GenerateEmbeddingRequestCopyWith<$Res> { - _$GenerateEmbeddingRequestCopyWithImpl(this._value, this._then); +class _$ToolCopyWithImpl<$Res, $Val extends Tool> + implements $ToolCopyWith<$Res> { + _$ToolCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3275,251 +3599,186 @@ class _$GenerateEmbeddingRequestCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? model = null, - Object? prompt = null, - Object? options = freezed, - Object? keepAlive = freezed, + Object? type = null, + Object? function = freezed, }) { return _then(_value.copyWith( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - prompt: null == prompt - ? _value.prompt - : prompt // ignore: cast_nullable_to_non_nullable - as String, - options: freezed == options - ? _value.options - : options // ignore: cast_nullable_to_non_nullable - as RequestOptions?, - keepAlive: freezed == keepAlive - ? _value.keepAlive - : keepAlive // ignore: cast_nullable_to_non_nullable - as int?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, ) as $Val); } @override @pragma('vm:prefer-inline') - $RequestOptionsCopyWith<$Res>? get options { - if (_value.options == null) { + $ToolFunctionCopyWith<$Res>? get function { + if (_value.function == null) { return null; } - return $RequestOptionsCopyWith<$Res>(_value.options!, (value) { - return _then(_value.copyWith(options: value) as $Val); + return $ToolFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); }); } } /// @nodoc -abstract class _$$GenerateEmbeddingRequestImplCopyWith<$Res> - implements $GenerateEmbeddingRequestCopyWith<$Res> { - factory _$$GenerateEmbeddingRequestImplCopyWith( - _$GenerateEmbeddingRequestImpl value, - $Res Function(_$GenerateEmbeddingRequestImpl) then) = - __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res>; +abstract class _$$ToolImplCopyWith<$Res> implements $ToolCopyWith<$Res> { + factory _$$ToolImplCopyWith( + _$ToolImpl value, $Res Function(_$ToolImpl) then) = + __$$ToolImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String model, - String prompt, - @JsonKey(includeIfNull: false) RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + {ToolType type, @JsonKey(includeIfNull: false) ToolFunction? function}); @override - $RequestOptionsCopyWith<$Res>? get options; + $ToolFunctionCopyWith<$Res>? get function; } /// @nodoc -class __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res> - extends _$GenerateEmbeddingRequestCopyWithImpl<$Res, - _$GenerateEmbeddingRequestImpl> - implements _$$GenerateEmbeddingRequestImplCopyWith<$Res> { - __$$GenerateEmbeddingRequestImplCopyWithImpl( - _$GenerateEmbeddingRequestImpl _value, - $Res Function(_$GenerateEmbeddingRequestImpl) _then) +class __$$ToolImplCopyWithImpl<$Res> + extends _$ToolCopyWithImpl<$Res, _$ToolImpl> + implements _$$ToolImplCopyWith<$Res> { + __$$ToolImplCopyWithImpl(_$ToolImpl _value, $Res Function(_$ToolImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = null, - Object? prompt = null, - Object? options = freezed, - Object? keepAlive = freezed, + Object? type = null, + Object? function = freezed, }) { - return _then(_$GenerateEmbeddingRequestImpl( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - prompt: null == prompt - ? _value.prompt - : prompt // ignore: cast_nullable_to_non_nullable - as String, - options: freezed == options - ? _value.options - : options // ignore: cast_nullable_to_non_nullable - as RequestOptions?, - keepAlive: freezed == keepAlive - ? _value.keepAlive - : keepAlive // ignore: cast_nullable_to_non_nullable - as int?, + return _then(_$ToolImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ToolType, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolFunction?, )); } } /// @nodoc @JsonSerializable() -class _$GenerateEmbeddingRequestImpl extends _GenerateEmbeddingRequest { - const _$GenerateEmbeddingRequestImpl( - {required this.model, - required this.prompt, - @JsonKey(includeIfNull: false) this.options, - @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) +class _$ToolImpl extends _Tool { + const _$ToolImpl( + {this.type = ToolType.function, + @JsonKey(includeIfNull: false) this.function}) : super._(); - factory _$GenerateEmbeddingRequestImpl.fromJson(Map json) => - _$$GenerateEmbeddingRequestImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - final String model; + factory _$ToolImpl.fromJson(Map json) => + _$$ToolImplFromJson(json); - /// Text to generate embeddings for. + /// The type of tool. @override - final String prompt; + @JsonKey() + final ToolType type; - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + /// A function that the model may call. @override @JsonKey(includeIfNull: false) - final RequestOptions? options; - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @override - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive; + final ToolFunction? function; @override String toString() { - return 'GenerateEmbeddingRequest(model: $model, prompt: $prompt, options: $options, keepAlive: $keepAlive)'; + return 'Tool(type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$GenerateEmbeddingRequestImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.prompt, prompt) || other.prompt == prompt) && - (identical(other.options, options) || other.options == options) && - (identical(other.keepAlive, keepAlive) || - other.keepAlive == keepAlive)); + other is _$ToolImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.function, function) || + other.function == function)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, model, prompt, options, keepAlive); + int get hashCode => Object.hash(runtimeType, type, function); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> - get copyWith => __$$GenerateEmbeddingRequestImplCopyWithImpl< - _$GenerateEmbeddingRequestImpl>(this, _$identity); + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + __$$ToolImplCopyWithImpl<_$ToolImpl>(this, _$identity); @override Map toJson() { - return _$$GenerateEmbeddingRequestImplToJson( + return _$$ToolImplToJson( this, ); } } -abstract class _GenerateEmbeddingRequest extends GenerateEmbeddingRequest { - const factory _GenerateEmbeddingRequest( - {required final String model, - required final String prompt, - @JsonKey(includeIfNull: false) final RequestOptions? options, - @JsonKey(name: 'keep_alive', includeIfNull: false) - final int? keepAlive}) = _$GenerateEmbeddingRequestImpl; - const _GenerateEmbeddingRequest._() : super._(); - - factory _GenerateEmbeddingRequest.fromJson(Map json) = - _$GenerateEmbeddingRequestImpl.fromJson; +abstract class _Tool extends Tool { + const factory _Tool( + {final ToolType type, + @JsonKey(includeIfNull: false) final ToolFunction? function}) = + _$ToolImpl; + const _Tool._() : super._(); - @override + factory _Tool.fromJson(Map json) = _$ToolImpl.fromJson; - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model; @override - /// Text to generate embeddings for. - String get prompt; + /// The type of tool. + ToolType get type; @override - /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + /// A function that the model may call. @JsonKey(includeIfNull: false) - RequestOptions? get options; - @override - - /// How long (in minutes) to keep the model loaded in memory. - /// - /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. - /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. - /// - If set to 0, the model will be unloaded immediately once finished. - /// - If not set, the model will stay loaded for 5 minutes by default - @JsonKey(name: 'keep_alive', includeIfNull: false) - int? get keepAlive; + ToolFunction? get function; @override @JsonKey(ignore: true) - _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> - get copyWith => throw _privateConstructorUsedError; + _$$ToolImplCopyWith<_$ToolImpl> get copyWith => + throw _privateConstructorUsedError; } -GenerateEmbeddingResponse _$GenerateEmbeddingResponseFromJson( - Map json) { - return _GenerateEmbeddingResponse.fromJson(json); +ToolFunction _$ToolFunctionFromJson(Map json) { + return _ToolFunction.fromJson(json); } /// @nodoc -mixin _$GenerateEmbeddingResponse { - /// The embedding for the prompt. - @JsonKey(includeIfNull: false) - List? get embedding => throw _privateConstructorUsedError; +mixin _$ToolFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description => throw _privateConstructorUsedError; + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $GenerateEmbeddingResponseCopyWith get copyWith => + $ToolFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $GenerateEmbeddingResponseCopyWith<$Res> { - factory $GenerateEmbeddingResponseCopyWith(GenerateEmbeddingResponse value, - $Res Function(GenerateEmbeddingResponse) then) = - _$GenerateEmbeddingResponseCopyWithImpl<$Res, GenerateEmbeddingResponse>; +abstract class $ToolFunctionCopyWith<$Res> { + factory $ToolFunctionCopyWith( + ToolFunction value, $Res Function(ToolFunction) then) = + _$ToolFunctionCopyWithImpl<$Res, ToolFunction>; @useResult - $Res call({@JsonKey(includeIfNull: false) List? embedding}); + $Res call({String name, String description, Map parameters}); } /// @nodoc -class _$GenerateEmbeddingResponseCopyWithImpl<$Res, - $Val extends GenerateEmbeddingResponse> - implements $GenerateEmbeddingResponseCopyWith<$Res> { - _$GenerateEmbeddingResponseCopyWithImpl(this._value, this._then); +class _$ToolFunctionCopyWithImpl<$Res, $Val extends ToolFunction> + implements $ToolFunctionCopyWith<$Res> { + _$ToolFunctionCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3529,181 +3788,196 @@ class _$GenerateEmbeddingResponseCopyWithImpl<$Res, @pragma('vm:prefer-inline') @override $Res call({ - Object? embedding = freezed, + Object? name = null, + Object? description = null, + Object? parameters = null, }) { return _then(_value.copyWith( - embedding: freezed == embedding - ? _value.embedding - : embedding // ignore: cast_nullable_to_non_nullable - as List?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value.parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, ) as $Val); } } /// @nodoc -abstract class _$$GenerateEmbeddingResponseImplCopyWith<$Res> - implements $GenerateEmbeddingResponseCopyWith<$Res> { - factory _$$GenerateEmbeddingResponseImplCopyWith( - _$GenerateEmbeddingResponseImpl value, - $Res Function(_$GenerateEmbeddingResponseImpl) then) = - __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res>; +abstract class _$$ToolFunctionImplCopyWith<$Res> + implements $ToolFunctionCopyWith<$Res> { + factory _$$ToolFunctionImplCopyWith( + _$ToolFunctionImpl value, $Res Function(_$ToolFunctionImpl) then) = + __$$ToolFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(includeIfNull: false) List? embedding}); + $Res call({String name, String description, Map parameters}); } /// @nodoc -class __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res> - extends _$GenerateEmbeddingResponseCopyWithImpl<$Res, - _$GenerateEmbeddingResponseImpl> - implements _$$GenerateEmbeddingResponseImplCopyWith<$Res> { - __$$GenerateEmbeddingResponseImplCopyWithImpl( - _$GenerateEmbeddingResponseImpl _value, - $Res Function(_$GenerateEmbeddingResponseImpl) _then) +class __$$ToolFunctionImplCopyWithImpl<$Res> + extends _$ToolFunctionCopyWithImpl<$Res, _$ToolFunctionImpl> + implements _$$ToolFunctionImplCopyWith<$Res> { + __$$ToolFunctionImplCopyWithImpl( + _$ToolFunctionImpl _value, $Res Function(_$ToolFunctionImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? embedding = freezed, + Object? name = null, + Object? description = null, + Object? parameters = null, }) { - return _then(_$GenerateEmbeddingResponseImpl( - embedding: freezed == embedding - ? _value._embedding - : embedding // ignore: cast_nullable_to_non_nullable - as List?, + return _then(_$ToolFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: null == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String, + parameters: null == parameters + ? _value._parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, )); } } /// @nodoc @JsonSerializable() -class _$GenerateEmbeddingResponseImpl extends _GenerateEmbeddingResponse { - const _$GenerateEmbeddingResponseImpl( - {@JsonKey(includeIfNull: false) final List? embedding}) - : _embedding = embedding, +class _$ToolFunctionImpl extends _ToolFunction { + const _$ToolFunctionImpl( + {required this.name, + required this.description, + required final Map parameters}) + : _parameters = parameters, super._(); - factory _$GenerateEmbeddingResponseImpl.fromJson(Map json) => - _$$GenerateEmbeddingResponseImplFromJson(json); + factory _$ToolFunctionImpl.fromJson(Map json) => + _$$ToolFunctionImplFromJson(json); - /// The embedding for the prompt. - final List? _embedding; + /// The name of the function to be called. + @override + final String name; - /// The embedding for the prompt. + /// A description of what the function does, used by the model to choose when and how to call the function. @override - @JsonKey(includeIfNull: false) - List? get embedding { - final value = _embedding; - if (value == null) return null; - if (_embedding is EqualUnmodifiableListView) return _embedding; + final String description; + + /// The parameters the functions accepts, described as a JSON Schema object. + final Map _parameters; + + /// The parameters the functions accepts, described as a JSON Schema object. + @override + Map get parameters { + if (_parameters is EqualUnmodifiableMapView) return _parameters; // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); + return EqualUnmodifiableMapView(_parameters); } @override String toString() { - return 'GenerateEmbeddingResponse(embedding: $embedding)'; + return 'ToolFunction(name: $name, description: $description, parameters: $parameters)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$GenerateEmbeddingResponseImpl && + other is _$ToolFunctionImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && const DeepCollectionEquality() - .equals(other._embedding, _embedding)); + .equals(other._parameters, _parameters)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_embedding)); + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_parameters)); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> - get copyWith => __$$GenerateEmbeddingResponseImplCopyWithImpl< - _$GenerateEmbeddingResponseImpl>(this, _$identity); + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + __$$ToolFunctionImplCopyWithImpl<_$ToolFunctionImpl>(this, _$identity); @override Map toJson() { - return _$$GenerateEmbeddingResponseImplToJson( + return _$$ToolFunctionImplToJson( this, ); } } -abstract class _GenerateEmbeddingResponse extends GenerateEmbeddingResponse { - const factory _GenerateEmbeddingResponse( - {@JsonKey(includeIfNull: false) final List? embedding}) = - _$GenerateEmbeddingResponseImpl; - const _GenerateEmbeddingResponse._() : super._(); +abstract class _ToolFunction extends ToolFunction { + const factory _ToolFunction( + {required final String name, + required final String description, + required final Map parameters}) = _$ToolFunctionImpl; + const _ToolFunction._() : super._(); - factory _GenerateEmbeddingResponse.fromJson(Map json) = - _$GenerateEmbeddingResponseImpl.fromJson; + factory _ToolFunction.fromJson(Map json) = + _$ToolFunctionImpl.fromJson; @override - /// The embedding for the prompt. - @JsonKey(includeIfNull: false) - List? get embedding; + /// The name of the function to be called. + String get name; + @override + + /// A description of what the function does, used by the model to choose when and how to call the function. + String get description; + @override + + /// The parameters the functions accepts, described as a JSON Schema object. + Map get parameters; @override @JsonKey(ignore: true) - _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> - get copyWith => throw _privateConstructorUsedError; + _$$ToolFunctionImplCopyWith<_$ToolFunctionImpl> get copyWith => + throw _privateConstructorUsedError; } -CreateModelRequest _$CreateModelRequestFromJson(Map json) { - return _CreateModelRequest.fromJson(json); +ToolCall _$ToolCallFromJson(Map json) { + return _ToolCall.fromJson(json); } /// @nodoc -mixin _$CreateModelRequest { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model => throw _privateConstructorUsedError; - - /// The contents of the Modelfile. - String get modelfile => throw _privateConstructorUsedError; - - /// Path to the Modelfile (optional) - @JsonKey(includeIfNull: false) - String? get path => throw _privateConstructorUsedError; - - /// The quantization level of the model. +mixin _$ToolCall { + /// The function the model wants to call. @JsonKey(includeIfNull: false) - String? get quantize => throw _privateConstructorUsedError; - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - bool get stream => throw _privateConstructorUsedError; + ToolCallFunction? get function => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $CreateModelRequestCopyWith get copyWith => + $ToolCallCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateModelRequestCopyWith<$Res> { - factory $CreateModelRequestCopyWith( - CreateModelRequest value, $Res Function(CreateModelRequest) then) = - _$CreateModelRequestCopyWithImpl<$Res, CreateModelRequest>; +abstract class $ToolCallCopyWith<$Res> { + factory $ToolCallCopyWith(ToolCall value, $Res Function(ToolCall) then) = + _$ToolCallCopyWithImpl<$Res, ToolCall>; @useResult - $Res call( - {String model, - String modelfile, - @JsonKey(includeIfNull: false) String? path, - @JsonKey(includeIfNull: false) String? quantize, - bool stream}); + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + $ToolCallFunctionCopyWith<$Res>? get function; } /// @nodoc -class _$CreateModelRequestCopyWithImpl<$Res, $Val extends CreateModelRequest> - implements $CreateModelRequestCopyWith<$Res> { - _$CreateModelRequestCopyWithImpl(this._value, this._then); +class _$ToolCallCopyWithImpl<$Res, $Val extends ToolCall> + implements $ToolCallCopyWith<$Res> { + _$ToolCallCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3713,249 +3987,162 @@ class _$CreateModelRequestCopyWithImpl<$Res, $Val extends CreateModelRequest> @pragma('vm:prefer-inline') @override $Res call({ - Object? model = null, - Object? modelfile = null, - Object? path = freezed, - Object? quantize = freezed, - Object? stream = null, + Object? function = freezed, }) { return _then(_value.copyWith( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - modelfile: null == modelfile - ? _value.modelfile - : modelfile // ignore: cast_nullable_to_non_nullable - as String, - path: freezed == path - ? _value.path - : path // ignore: cast_nullable_to_non_nullable - as String?, - quantize: freezed == quantize - ? _value.quantize - : quantize // ignore: cast_nullable_to_non_nullable - as String?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ToolCallFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $ToolCallFunctionCopyWith<$Res>(_value.function!, (value) { + return _then(_value.copyWith(function: value) as $Val); + }); + } } /// @nodoc -abstract class _$$CreateModelRequestImplCopyWith<$Res> - implements $CreateModelRequestCopyWith<$Res> { - factory _$$CreateModelRequestImplCopyWith(_$CreateModelRequestImpl value, - $Res Function(_$CreateModelRequestImpl) then) = - __$$CreateModelRequestImplCopyWithImpl<$Res>; +abstract class _$$ToolCallImplCopyWith<$Res> + implements $ToolCallCopyWith<$Res> { + factory _$$ToolCallImplCopyWith( + _$ToolCallImpl value, $Res Function(_$ToolCallImpl) then) = + __$$ToolCallImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {String model, - String modelfile, - @JsonKey(includeIfNull: false) String? path, - @JsonKey(includeIfNull: false) String? quantize, - bool stream}); + $Res call({@JsonKey(includeIfNull: false) ToolCallFunction? function}); + + @override + $ToolCallFunctionCopyWith<$Res>? get function; } /// @nodoc -class __$$CreateModelRequestImplCopyWithImpl<$Res> - extends _$CreateModelRequestCopyWithImpl<$Res, _$CreateModelRequestImpl> - implements _$$CreateModelRequestImplCopyWith<$Res> { - __$$CreateModelRequestImplCopyWithImpl(_$CreateModelRequestImpl _value, - $Res Function(_$CreateModelRequestImpl) _then) +class __$$ToolCallImplCopyWithImpl<$Res> + extends _$ToolCallCopyWithImpl<$Res, _$ToolCallImpl> + implements _$$ToolCallImplCopyWith<$Res> { + __$$ToolCallImplCopyWithImpl( + _$ToolCallImpl _value, $Res Function(_$ToolCallImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = null, - Object? modelfile = null, - Object? path = freezed, - Object? quantize = freezed, - Object? stream = null, + Object? function = freezed, }) { - return _then(_$CreateModelRequestImpl( - model: null == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String, - modelfile: null == modelfile - ? _value.modelfile - : modelfile // ignore: cast_nullable_to_non_nullable - as String, - path: freezed == path - ? _value.path - : path // ignore: cast_nullable_to_non_nullable - as String?, - quantize: freezed == quantize - ? _value.quantize - : quantize // ignore: cast_nullable_to_non_nullable - as String?, - stream: null == stream - ? _value.stream - : stream // ignore: cast_nullable_to_non_nullable - as bool, + return _then(_$ToolCallImpl( + function: freezed == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as ToolCallFunction?, )); } } /// @nodoc @JsonSerializable() -class _$CreateModelRequestImpl extends _CreateModelRequest { - const _$CreateModelRequestImpl( - {required this.model, - required this.modelfile, - @JsonKey(includeIfNull: false) this.path, - @JsonKey(includeIfNull: false) this.quantize, - this.stream = false}) +class _$ToolCallImpl extends _ToolCall { + const _$ToolCallImpl({@JsonKey(includeIfNull: false) this.function}) : super._(); - factory _$CreateModelRequestImpl.fromJson(Map json) => - _$$CreateModelRequestImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - final String model; - - /// The contents of the Modelfile. - @override - final String modelfile; - - /// Path to the Modelfile (optional) - @override - @JsonKey(includeIfNull: false) - final String? path; + factory _$ToolCallImpl.fromJson(Map json) => + _$$ToolCallImplFromJson(json); - /// The quantization level of the model. + /// The function the model wants to call. @override @JsonKey(includeIfNull: false) - final String? quantize; - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - @override - @JsonKey() - final bool stream; + final ToolCallFunction? function; @override String toString() { - return 'CreateModelRequest(model: $model, modelfile: $modelfile, path: $path, quantize: $quantize, stream: $stream)'; + return 'ToolCall(function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateModelRequestImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.modelfile, modelfile) || - other.modelfile == modelfile) && - (identical(other.path, path) || other.path == path) && - (identical(other.quantize, quantize) || - other.quantize == quantize) && - (identical(other.stream, stream) || other.stream == stream)); + other is _$ToolCallImpl && + (identical(other.function, function) || + other.function == function)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, model, modelfile, path, quantize, stream); + int get hashCode => Object.hash(runtimeType, function); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => - __$$CreateModelRequestImplCopyWithImpl<_$CreateModelRequestImpl>( - this, _$identity); + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => + __$$ToolCallImplCopyWithImpl<_$ToolCallImpl>(this, _$identity); @override Map toJson() { - return _$$CreateModelRequestImplToJson( + return _$$ToolCallImplToJson( this, ); } } -abstract class _CreateModelRequest extends CreateModelRequest { - const factory _CreateModelRequest( - {required final String model, - required final String modelfile, - @JsonKey(includeIfNull: false) final String? path, - @JsonKey(includeIfNull: false) final String? quantize, - final bool stream}) = _$CreateModelRequestImpl; - const _CreateModelRequest._() : super._(); - - factory _CreateModelRequest.fromJson(Map json) = - _$CreateModelRequestImpl.fromJson; - - @override - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - String get model; - @override +abstract class _ToolCall extends ToolCall { + const factory _ToolCall( + {@JsonKey(includeIfNull: false) final ToolCallFunction? function}) = + _$ToolCallImpl; + const _ToolCall._() : super._(); - /// The contents of the Modelfile. - String get modelfile; - @override + factory _ToolCall.fromJson(Map json) = + _$ToolCallImpl.fromJson; - /// Path to the Modelfile (optional) - @JsonKey(includeIfNull: false) - String? get path; @override - /// The quantization level of the model. + /// The function the model wants to call. @JsonKey(includeIfNull: false) - String? get quantize; - @override - - /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. - bool get stream; + ToolCallFunction? get function; @override @JsonKey(ignore: true) - _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => + _$$ToolCallImplCopyWith<_$ToolCallImpl> get copyWith => throw _privateConstructorUsedError; } -CreateModelResponse _$CreateModelResponseFromJson(Map json) { - return _CreateModelResponse.fromJson(json); +ToolCallFunction _$ToolCallFunctionFromJson(Map json) { + return _ToolCallFunction.fromJson(json); } /// @nodoc -mixin _$CreateModelResponse { - /// Status creating the model - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? get status => throw _privateConstructorUsedError; +mixin _$ToolCallFunction { + /// The name of the function to be called. + String get name => throw _privateConstructorUsedError; + + /// The arguments to pass to the function. + Map get arguments => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $CreateModelResponseCopyWith get copyWith => + $ToolCallFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateModelResponseCopyWith<$Res> { - factory $CreateModelResponseCopyWith( - CreateModelResponse value, $Res Function(CreateModelResponse) then) = - _$CreateModelResponseCopyWithImpl<$Res, CreateModelResponse>; +abstract class $ToolCallFunctionCopyWith<$Res> { + factory $ToolCallFunctionCopyWith( + ToolCallFunction value, $Res Function(ToolCallFunction) then) = + _$ToolCallFunctionCopyWithImpl<$Res, ToolCallFunction>; @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? status}); + $Res call({String name, Map arguments}); } /// @nodoc -class _$CreateModelResponseCopyWithImpl<$Res, $Val extends CreateModelResponse> - implements $CreateModelResponseCopyWith<$Res> { - _$CreateModelResponseCopyWithImpl(this._value, this._then); +class _$ToolCallFunctionCopyWithImpl<$Res, $Val extends ToolCallFunction> + implements $ToolCallFunctionCopyWith<$Res> { + _$ToolCallFunctionCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -3965,157 +4152,1838 @@ class _$CreateModelResponseCopyWithImpl<$Res, $Val extends CreateModelResponse> @pragma('vm:prefer-inline') @override $Res call({ - Object? status = freezed, + Object? name = null, + Object? arguments = null, }) { return _then(_value.copyWith( - status: freezed == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as CreateModelStatus?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, ) as $Val); } } /// @nodoc -abstract class _$$CreateModelResponseImplCopyWith<$Res> - implements $CreateModelResponseCopyWith<$Res> { - factory _$$CreateModelResponseImplCopyWith(_$CreateModelResponseImpl value, - $Res Function(_$CreateModelResponseImpl) then) = - __$$CreateModelResponseImplCopyWithImpl<$Res>; +abstract class _$$ToolCallFunctionImplCopyWith<$Res> + implements $ToolCallFunctionCopyWith<$Res> { + factory _$$ToolCallFunctionImplCopyWith(_$ToolCallFunctionImpl value, + $Res Function(_$ToolCallFunctionImpl) then) = + __$$ToolCallFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? status}); + $Res call({String name, Map arguments}); } /// @nodoc -class __$$CreateModelResponseImplCopyWithImpl<$Res> - extends _$CreateModelResponseCopyWithImpl<$Res, _$CreateModelResponseImpl> - implements _$$CreateModelResponseImplCopyWith<$Res> { - __$$CreateModelResponseImplCopyWithImpl(_$CreateModelResponseImpl _value, - $Res Function(_$CreateModelResponseImpl) _then) +class __$$ToolCallFunctionImplCopyWithImpl<$Res> + extends _$ToolCallFunctionCopyWithImpl<$Res, _$ToolCallFunctionImpl> + implements _$$ToolCallFunctionImplCopyWith<$Res> { + __$$ToolCallFunctionImplCopyWithImpl(_$ToolCallFunctionImpl _value, + $Res Function(_$ToolCallFunctionImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? status = freezed, + Object? name = null, + Object? arguments = null, }) { - return _then(_$CreateModelResponseImpl( - status: freezed == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as CreateModelStatus?, + return _then(_$ToolCallFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value._arguments + : arguments // ignore: cast_nullable_to_non_nullable + as Map, )); } } /// @nodoc @JsonSerializable() -class _$CreateModelResponseImpl extends _CreateModelResponse { - const _$CreateModelResponseImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.status}) - : super._(); +class _$ToolCallFunctionImpl extends _ToolCallFunction { + const _$ToolCallFunctionImpl( + {required this.name, required final Map arguments}) + : _arguments = arguments, + super._(); - factory _$CreateModelResponseImpl.fromJson(Map json) => - _$$CreateModelResponseImplFromJson(json); + factory _$ToolCallFunctionImpl.fromJson(Map json) => + _$$ToolCallFunctionImplFromJson(json); - /// Status creating the model + /// The name of the function to be called. @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateModelStatus? status; + final String name; + + /// The arguments to pass to the function. + final Map _arguments; + + /// The arguments to pass to the function. + @override + Map get arguments { + if (_arguments is EqualUnmodifiableMapView) return _arguments; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_arguments); + } @override String toString() { - return 'CreateModelResponse(status: $status)'; + return 'ToolCallFunction(name: $name, arguments: $arguments)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateModelResponseImpl && - (identical(other.status, status) || other.status == status)); + other is _$ToolCallFunctionImpl && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality() + .equals(other._arguments, _arguments)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, status); + int get hashCode => Object.hash( + runtimeType, name, const DeepCollectionEquality().hash(_arguments)); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => - __$$CreateModelResponseImplCopyWithImpl<_$CreateModelResponseImpl>( + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => + __$$ToolCallFunctionImplCopyWithImpl<_$ToolCallFunctionImpl>( this, _$identity); @override Map toJson() { - return _$$CreateModelResponseImplToJson( + return _$$ToolCallFunctionImplToJson( this, ); } } -abstract class _CreateModelResponse extends CreateModelResponse { - const factory _CreateModelResponse( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final CreateModelStatus? status}) = _$CreateModelResponseImpl; - const _CreateModelResponse._() : super._(); +abstract class _ToolCallFunction extends ToolCallFunction { + const factory _ToolCallFunction( + {required final String name, + required final Map arguments}) = _$ToolCallFunctionImpl; + const _ToolCallFunction._() : super._(); - factory _CreateModelResponse.fromJson(Map json) = - _$CreateModelResponseImpl.fromJson; + factory _ToolCallFunction.fromJson(Map json) = + _$ToolCallFunctionImpl.fromJson; @override - /// Status creating the model - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - CreateModelStatus? get status; + /// The name of the function to be called. + String get name; + @override + + /// The arguments to pass to the function. + Map get arguments; @override @JsonKey(ignore: true) - _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => + _$$ToolCallFunctionImplCopyWith<_$ToolCallFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -ModelsResponse _$ModelsResponseFromJson(Map json) { - return _ModelsResponse.fromJson(json); +GenerateEmbeddingRequest _$GenerateEmbeddingRequestFromJson( + Map json) { + return _GenerateEmbeddingRequest.fromJson(json); +} + +/// @nodoc +mixin _$GenerateEmbeddingRequest { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model => throw _privateConstructorUsedError; + + /// Text to generate embeddings for. + String get prompt => throw _privateConstructorUsedError; + + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + @JsonKey(includeIfNull: false) + RequestOptions? get options => throw _privateConstructorUsedError; + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @JsonKey(name: 'keep_alive', includeIfNull: false) + int? get keepAlive => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $GenerateEmbeddingRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $GenerateEmbeddingRequestCopyWith<$Res> { + factory $GenerateEmbeddingRequestCopyWith(GenerateEmbeddingRequest value, + $Res Function(GenerateEmbeddingRequest) then) = + _$GenerateEmbeddingRequestCopyWithImpl<$Res, GenerateEmbeddingRequest>; + @useResult + $Res call( + {String model, + String prompt, + @JsonKey(includeIfNull: false) RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + + $RequestOptionsCopyWith<$Res>? get options; +} + +/// @nodoc +class _$GenerateEmbeddingRequestCopyWithImpl<$Res, + $Val extends GenerateEmbeddingRequest> + implements $GenerateEmbeddingRequestCopyWith<$Res> { + _$GenerateEmbeddingRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? prompt = null, + Object? options = freezed, + Object? keepAlive = freezed, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + options: freezed == options + ? _value.options + : options // ignore: cast_nullable_to_non_nullable + as RequestOptions?, + keepAlive: freezed == keepAlive + ? _value.keepAlive + : keepAlive // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $RequestOptionsCopyWith<$Res>? get options { + if (_value.options == null) { + return null; + } + + return $RequestOptionsCopyWith<$Res>(_value.options!, (value) { + return _then(_value.copyWith(options: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$GenerateEmbeddingRequestImplCopyWith<$Res> + implements $GenerateEmbeddingRequestCopyWith<$Res> { + factory _$$GenerateEmbeddingRequestImplCopyWith( + _$GenerateEmbeddingRequestImpl value, + $Res Function(_$GenerateEmbeddingRequestImpl) then) = + __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String model, + String prompt, + @JsonKey(includeIfNull: false) RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) int? keepAlive}); + + @override + $RequestOptionsCopyWith<$Res>? get options; +} + +/// @nodoc +class __$$GenerateEmbeddingRequestImplCopyWithImpl<$Res> + extends _$GenerateEmbeddingRequestCopyWithImpl<$Res, + _$GenerateEmbeddingRequestImpl> + implements _$$GenerateEmbeddingRequestImplCopyWith<$Res> { + __$$GenerateEmbeddingRequestImplCopyWithImpl( + _$GenerateEmbeddingRequestImpl _value, + $Res Function(_$GenerateEmbeddingRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? prompt = null, + Object? options = freezed, + Object? keepAlive = freezed, + }) { + return _then(_$GenerateEmbeddingRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + options: freezed == options + ? _value.options + : options // ignore: cast_nullable_to_non_nullable + as RequestOptions?, + keepAlive: freezed == keepAlive + ? _value.keepAlive + : keepAlive // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$GenerateEmbeddingRequestImpl extends _GenerateEmbeddingRequest { + const _$GenerateEmbeddingRequestImpl( + {required this.model, + required this.prompt, + @JsonKey(includeIfNull: false) this.options, + @JsonKey(name: 'keep_alive', includeIfNull: false) this.keepAlive}) + : super._(); + + factory _$GenerateEmbeddingRequestImpl.fromJson(Map json) => + _$$GenerateEmbeddingRequestImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + final String model; + + /// Text to generate embeddings for. + @override + final String prompt; + + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + @override + @JsonKey(includeIfNull: false) + final RequestOptions? options; + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @override + @JsonKey(name: 'keep_alive', includeIfNull: false) + final int? keepAlive; + + @override + String toString() { + return 'GenerateEmbeddingRequest(model: $model, prompt: $prompt, options: $options, keepAlive: $keepAlive)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$GenerateEmbeddingRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.options, options) || other.options == options) && + (identical(other.keepAlive, keepAlive) || + other.keepAlive == keepAlive)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, model, prompt, options, keepAlive); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> + get copyWith => __$$GenerateEmbeddingRequestImplCopyWithImpl< + _$GenerateEmbeddingRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$GenerateEmbeddingRequestImplToJson( + this, + ); + } +} + +abstract class _GenerateEmbeddingRequest extends GenerateEmbeddingRequest { + const factory _GenerateEmbeddingRequest( + {required final String model, + required final String prompt, + @JsonKey(includeIfNull: false) final RequestOptions? options, + @JsonKey(name: 'keep_alive', includeIfNull: false) + final int? keepAlive}) = _$GenerateEmbeddingRequestImpl; + const _GenerateEmbeddingRequest._() : super._(); + + factory _GenerateEmbeddingRequest.fromJson(Map json) = + _$GenerateEmbeddingRequestImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model; + @override + + /// Text to generate embeddings for. + String get prompt; + @override + + /// Additional model parameters listed in the documentation for the Modelfile such as `temperature`. + @JsonKey(includeIfNull: false) + RequestOptions? get options; + @override + + /// How long (in minutes) to keep the model loaded in memory. + /// + /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. + /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely. + /// - If set to 0, the model will be unloaded immediately once finished. + /// - If not set, the model will stay loaded for 5 minutes by default + @JsonKey(name: 'keep_alive', includeIfNull: false) + int? get keepAlive; + @override + @JsonKey(ignore: true) + _$$GenerateEmbeddingRequestImplCopyWith<_$GenerateEmbeddingRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +GenerateEmbeddingResponse _$GenerateEmbeddingResponseFromJson( + Map json) { + return _GenerateEmbeddingResponse.fromJson(json); +} + +/// @nodoc +mixin _$GenerateEmbeddingResponse { + /// The embedding for the prompt. + @JsonKey(includeIfNull: false) + List? get embedding => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $GenerateEmbeddingResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $GenerateEmbeddingResponseCopyWith<$Res> { + factory $GenerateEmbeddingResponseCopyWith(GenerateEmbeddingResponse value, + $Res Function(GenerateEmbeddingResponse) then) = + _$GenerateEmbeddingResponseCopyWithImpl<$Res, GenerateEmbeddingResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) List? embedding}); +} + +/// @nodoc +class _$GenerateEmbeddingResponseCopyWithImpl<$Res, + $Val extends GenerateEmbeddingResponse> + implements $GenerateEmbeddingResponseCopyWith<$Res> { + _$GenerateEmbeddingResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? embedding = freezed, + }) { + return _then(_value.copyWith( + embedding: freezed == embedding + ? _value.embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$GenerateEmbeddingResponseImplCopyWith<$Res> + implements $GenerateEmbeddingResponseCopyWith<$Res> { + factory _$$GenerateEmbeddingResponseImplCopyWith( + _$GenerateEmbeddingResponseImpl value, + $Res Function(_$GenerateEmbeddingResponseImpl) then) = + __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) List? embedding}); +} + +/// @nodoc +class __$$GenerateEmbeddingResponseImplCopyWithImpl<$Res> + extends _$GenerateEmbeddingResponseCopyWithImpl<$Res, + _$GenerateEmbeddingResponseImpl> + implements _$$GenerateEmbeddingResponseImplCopyWith<$Res> { + __$$GenerateEmbeddingResponseImplCopyWithImpl( + _$GenerateEmbeddingResponseImpl _value, + $Res Function(_$GenerateEmbeddingResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? embedding = freezed, + }) { + return _then(_$GenerateEmbeddingResponseImpl( + embedding: freezed == embedding + ? _value._embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$GenerateEmbeddingResponseImpl extends _GenerateEmbeddingResponse { + const _$GenerateEmbeddingResponseImpl( + {@JsonKey(includeIfNull: false) final List? embedding}) + : _embedding = embedding, + super._(); + + factory _$GenerateEmbeddingResponseImpl.fromJson(Map json) => + _$$GenerateEmbeddingResponseImplFromJson(json); + + /// The embedding for the prompt. + final List? _embedding; + + /// The embedding for the prompt. + @override + @JsonKey(includeIfNull: false) + List? get embedding { + final value = _embedding; + if (value == null) return null; + if (_embedding is EqualUnmodifiableListView) return _embedding; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'GenerateEmbeddingResponse(embedding: $embedding)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$GenerateEmbeddingResponseImpl && + const DeepCollectionEquality() + .equals(other._embedding, _embedding)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_embedding)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> + get copyWith => __$$GenerateEmbeddingResponseImplCopyWithImpl< + _$GenerateEmbeddingResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$GenerateEmbeddingResponseImplToJson( + this, + ); + } +} + +abstract class _GenerateEmbeddingResponse extends GenerateEmbeddingResponse { + const factory _GenerateEmbeddingResponse( + {@JsonKey(includeIfNull: false) final List? embedding}) = + _$GenerateEmbeddingResponseImpl; + const _GenerateEmbeddingResponse._() : super._(); + + factory _GenerateEmbeddingResponse.fromJson(Map json) = + _$GenerateEmbeddingResponseImpl.fromJson; + + @override + + /// The embedding for the prompt. + @JsonKey(includeIfNull: false) + List? get embedding; + @override + @JsonKey(ignore: true) + _$$GenerateEmbeddingResponseImplCopyWith<_$GenerateEmbeddingResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateModelRequest _$CreateModelRequestFromJson(Map json) { + return _CreateModelRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateModelRequest { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model => throw _privateConstructorUsedError; + + /// The contents of the Modelfile. + String get modelfile => throw _privateConstructorUsedError; + + /// Path to the Modelfile (optional) + @JsonKey(includeIfNull: false) + String? get path => throw _privateConstructorUsedError; + + /// The quantization level of the model. + @JsonKey(includeIfNull: false) + String? get quantize => throw _privateConstructorUsedError; + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + bool get stream => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateModelRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateModelRequestCopyWith<$Res> { + factory $CreateModelRequestCopyWith( + CreateModelRequest value, $Res Function(CreateModelRequest) then) = + _$CreateModelRequestCopyWithImpl<$Res, CreateModelRequest>; + @useResult + $Res call( + {String model, + String modelfile, + @JsonKey(includeIfNull: false) String? path, + @JsonKey(includeIfNull: false) String? quantize, + bool stream}); +} + +/// @nodoc +class _$CreateModelRequestCopyWithImpl<$Res, $Val extends CreateModelRequest> + implements $CreateModelRequestCopyWith<$Res> { + _$CreateModelRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? modelfile = null, + Object? path = freezed, + Object? quantize = freezed, + Object? stream = null, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + modelfile: null == modelfile + ? _value.modelfile + : modelfile // ignore: cast_nullable_to_non_nullable + as String, + path: freezed == path + ? _value.path + : path // ignore: cast_nullable_to_non_nullable + as String?, + quantize: freezed == quantize + ? _value.quantize + : quantize // ignore: cast_nullable_to_non_nullable + as String?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateModelRequestImplCopyWith<$Res> + implements $CreateModelRequestCopyWith<$Res> { + factory _$$CreateModelRequestImplCopyWith(_$CreateModelRequestImpl value, + $Res Function(_$CreateModelRequestImpl) then) = + __$$CreateModelRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String model, + String modelfile, + @JsonKey(includeIfNull: false) String? path, + @JsonKey(includeIfNull: false) String? quantize, + bool stream}); +} + +/// @nodoc +class __$$CreateModelRequestImplCopyWithImpl<$Res> + extends _$CreateModelRequestCopyWithImpl<$Res, _$CreateModelRequestImpl> + implements _$$CreateModelRequestImplCopyWith<$Res> { + __$$CreateModelRequestImplCopyWithImpl(_$CreateModelRequestImpl _value, + $Res Function(_$CreateModelRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? modelfile = null, + Object? path = freezed, + Object? quantize = freezed, + Object? stream = null, + }) { + return _then(_$CreateModelRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + modelfile: null == modelfile + ? _value.modelfile + : modelfile // ignore: cast_nullable_to_non_nullable + as String, + path: freezed == path + ? _value.path + : path // ignore: cast_nullable_to_non_nullable + as String?, + quantize: freezed == quantize + ? _value.quantize + : quantize // ignore: cast_nullable_to_non_nullable + as String?, + stream: null == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateModelRequestImpl extends _CreateModelRequest { + const _$CreateModelRequestImpl( + {required this.model, + required this.modelfile, + @JsonKey(includeIfNull: false) this.path, + @JsonKey(includeIfNull: false) this.quantize, + this.stream = false}) + : super._(); + + factory _$CreateModelRequestImpl.fromJson(Map json) => + _$$CreateModelRequestImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + final String model; + + /// The contents of the Modelfile. + @override + final String modelfile; + + /// Path to the Modelfile (optional) + @override + @JsonKey(includeIfNull: false) + final String? path; + + /// The quantization level of the model. + @override + @JsonKey(includeIfNull: false) + final String? quantize; + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + @override + @JsonKey() + final bool stream; + + @override + String toString() { + return 'CreateModelRequest(model: $model, modelfile: $modelfile, path: $path, quantize: $quantize, stream: $stream)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateModelRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.modelfile, modelfile) || + other.modelfile == modelfile) && + (identical(other.path, path) || other.path == path) && + (identical(other.quantize, quantize) || + other.quantize == quantize) && + (identical(other.stream, stream) || other.stream == stream)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, model, modelfile, path, quantize, stream); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => + __$$CreateModelRequestImplCopyWithImpl<_$CreateModelRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateModelRequestImplToJson( + this, + ); + } +} + +abstract class _CreateModelRequest extends CreateModelRequest { + const factory _CreateModelRequest( + {required final String model, + required final String modelfile, + @JsonKey(includeIfNull: false) final String? path, + @JsonKey(includeIfNull: false) final String? quantize, + final bool stream}) = _$CreateModelRequestImpl; + const _CreateModelRequest._() : super._(); + + factory _CreateModelRequest.fromJson(Map json) = + _$CreateModelRequestImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + String get model; + @override + + /// The contents of the Modelfile. + String get modelfile; + @override + + /// Path to the Modelfile (optional) + @JsonKey(includeIfNull: false) + String? get path; + @override + + /// The quantization level of the model. + @JsonKey(includeIfNull: false) + String? get quantize; + @override + + /// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. + bool get stream; + @override + @JsonKey(ignore: true) + _$$CreateModelRequestImplCopyWith<_$CreateModelRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateModelResponse _$CreateModelResponseFromJson(Map json) { + return _CreateModelResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateModelResponse { + /// Status creating the model + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? get status => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateModelResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateModelResponseCopyWith<$Res> { + factory $CreateModelResponseCopyWith( + CreateModelResponse value, $Res Function(CreateModelResponse) then) = + _$CreateModelResponseCopyWithImpl<$Res, CreateModelResponse>; + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? status}); +} + +/// @nodoc +class _$CreateModelResponseCopyWithImpl<$Res, $Val extends CreateModelResponse> + implements $CreateModelResponseCopyWith<$Res> { + _$CreateModelResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? status = freezed, + }) { + return _then(_value.copyWith( + status: freezed == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as CreateModelStatus?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateModelResponseImplCopyWith<$Res> + implements $CreateModelResponseCopyWith<$Res> { + factory _$$CreateModelResponseImplCopyWith(_$CreateModelResponseImpl value, + $Res Function(_$CreateModelResponseImpl) then) = + __$$CreateModelResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? status}); +} + +/// @nodoc +class __$$CreateModelResponseImplCopyWithImpl<$Res> + extends _$CreateModelResponseCopyWithImpl<$Res, _$CreateModelResponseImpl> + implements _$$CreateModelResponseImplCopyWith<$Res> { + __$$CreateModelResponseImplCopyWithImpl(_$CreateModelResponseImpl _value, + $Res Function(_$CreateModelResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? status = freezed, + }) { + return _then(_$CreateModelResponseImpl( + status: freezed == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as CreateModelStatus?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateModelResponseImpl extends _CreateModelResponse { + const _$CreateModelResponseImpl( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.status}) + : super._(); + + factory _$CreateModelResponseImpl.fromJson(Map json) => + _$$CreateModelResponseImplFromJson(json); + + /// Status creating the model + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateModelStatus? status; + + @override + String toString() { + return 'CreateModelResponse(status: $status)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateModelResponseImpl && + (identical(other.status, status) || other.status == status)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, status); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => + __$$CreateModelResponseImplCopyWithImpl<_$CreateModelResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateModelResponseImplToJson( + this, + ); + } +} + +abstract class _CreateModelResponse extends CreateModelResponse { + const factory _CreateModelResponse( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateModelStatus? status}) = _$CreateModelResponseImpl; + const _CreateModelResponse._() : super._(); + + factory _CreateModelResponse.fromJson(Map json) = + _$CreateModelResponseImpl.fromJson; + + @override + + /// Status creating the model + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateModelStatus? get status; + @override + @JsonKey(ignore: true) + _$$CreateModelResponseImplCopyWith<_$CreateModelResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ModelsResponse _$ModelsResponseFromJson(Map json) { + return _ModelsResponse.fromJson(json); } /// @nodoc mixin _$ModelsResponse { /// List of models available locally. @JsonKey(includeIfNull: false) - List? get models => throw _privateConstructorUsedError; + List? get models => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelsResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelsResponseCopyWith<$Res> { + factory $ModelsResponseCopyWith( + ModelsResponse value, $Res Function(ModelsResponse) then) = + _$ModelsResponseCopyWithImpl<$Res, ModelsResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class _$ModelsResponseCopyWithImpl<$Res, $Val extends ModelsResponse> + implements $ModelsResponseCopyWith<$Res> { + _$ModelsResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_value.copyWith( + models: freezed == models + ? _value.models + : models // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModelsResponseImplCopyWith<$Res> + implements $ModelsResponseCopyWith<$Res> { + factory _$$ModelsResponseImplCopyWith(_$ModelsResponseImpl value, + $Res Function(_$ModelsResponseImpl) then) = + __$$ModelsResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} + +/// @nodoc +class __$$ModelsResponseImplCopyWithImpl<$Res> + extends _$ModelsResponseCopyWithImpl<$Res, _$ModelsResponseImpl> + implements _$$ModelsResponseImplCopyWith<$Res> { + __$$ModelsResponseImplCopyWithImpl( + _$ModelsResponseImpl _value, $Res Function(_$ModelsResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_$ModelsResponseImpl( + models: freezed == models + ? _value._models + : models // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelsResponseImpl extends _ModelsResponse { + const _$ModelsResponseImpl( + {@JsonKey(includeIfNull: false) final List? models}) + : _models = models, + super._(); + + factory _$ModelsResponseImpl.fromJson(Map json) => + _$$ModelsResponseImplFromJson(json); + + /// List of models available locally. + final List? _models; + + /// List of models available locally. + @override + @JsonKey(includeIfNull: false) + List? get models { + final value = _models; + if (value == null) return null; + if (_models is EqualUnmodifiableListView) return _models; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'ModelsResponse(models: $models)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelsResponseImpl && + const DeepCollectionEquality().equals(other._models, _models)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => + __$$ModelsResponseImplCopyWithImpl<_$ModelsResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ModelsResponseImplToJson( + this, + ); + } +} + +abstract class _ModelsResponse extends ModelsResponse { + const factory _ModelsResponse( + {@JsonKey(includeIfNull: false) final List? models}) = + _$ModelsResponseImpl; + const _ModelsResponse._() : super._(); + + factory _ModelsResponse.fromJson(Map json) = + _$ModelsResponseImpl.fromJson; + + @override + + /// List of models available locally. + @JsonKey(includeIfNull: false) + List? get models; + @override + @JsonKey(ignore: true) + _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Model _$ModelFromJson(Map json) { + return _Model.fromJson(json); +} + +/// @nodoc +mixin _$Model { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// Model modification date. + @JsonKey(name: 'modified_at', includeIfNull: false) + String? get modifiedAt => throw _privateConstructorUsedError; + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size => throw _privateConstructorUsedError; + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest => throw _privateConstructorUsedError; + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details}); + + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? modifiedAt = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + }) { + return _then(_value.copyWith( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + modifiedAt: freezed == modifiedAt + ? _value.modifiedAt + : modifiedAt // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModelDetailsCopyWith<$Res>? get details { + if (_value.details == null) { + return null; + } + + return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { + return _then(_value.copyWith(details: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ModelImplCopyWith<$Res> implements $ModelCopyWith<$Res> { + factory _$$ModelImplCopyWith( + _$ModelImpl value, $Res Function(_$ModelImpl) then) = + __$$ModelImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details}); + + @override + $ModelDetailsCopyWith<$Res>? get details; +} + +/// @nodoc +class __$$ModelImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelImpl> + implements _$$ModelImplCopyWith<$Res> { + __$$ModelImplCopyWithImpl( + _$ModelImpl _value, $Res Function(_$ModelImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? modifiedAt = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + }) { + return _then(_$ModelImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + modifiedAt: freezed == modifiedAt + ? _value.modifiedAt + : modifiedAt // ignore: cast_nullable_to_non_nullable + as String?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable + as String?, + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelImpl extends _Model { + const _$ModelImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(name: 'modified_at', includeIfNull: false) this.modifiedAt, + @JsonKey(includeIfNull: false) this.size, + @JsonKey(includeIfNull: false) this.digest, + @JsonKey(includeIfNull: false) this.details}) + : super._(); + + factory _$ModelImpl.fromJson(Map json) => + _$$ModelImplFromJson(json); + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// Model modification date. + @override + @JsonKey(name: 'modified_at', includeIfNull: false) + final String? modifiedAt; + + /// Size of the model on disk. + @override + @JsonKey(includeIfNull: false) + final int? size; + + /// The model's digest. + @override + @JsonKey(includeIfNull: false) + final String? digest; + + /// Details about a model. + @override + @JsonKey(includeIfNull: false) + final ModelDetails? details; + + @override + String toString() { + return 'Model(model: $model, modifiedAt: $modifiedAt, size: $size, digest: $digest, details: $details)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.modifiedAt, modifiedAt) || + other.modifiedAt == modifiedAt) && + (identical(other.size, size) || other.size == size) && + (identical(other.digest, digest) || other.digest == digest) && + (identical(other.details, details) || other.details == details)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, model, modifiedAt, size, digest, details); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + __$$ModelImplCopyWithImpl<_$ModelImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModelImplToJson( + this, + ); + } +} + +abstract class _Model extends Model { + const factory _Model( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(name: 'modified_at', includeIfNull: false) + final String? modifiedAt, + @JsonKey(includeIfNull: false) final int? size, + @JsonKey(includeIfNull: false) final String? digest, + @JsonKey(includeIfNull: false) final ModelDetails? details}) = + _$ModelImpl; + const _Model._() : super._(); + + factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; + + @override + + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// Model modification date. + @JsonKey(name: 'modified_at', includeIfNull: false) + String? get modifiedAt; + @override + + /// Size of the model on disk. + @JsonKey(includeIfNull: false) + int? get size; + @override + + /// The model's digest. + @JsonKey(includeIfNull: false) + String? get digest; + @override + + /// Details about a model. + @JsonKey(includeIfNull: false) + ModelDetails? get details; + @override + @JsonKey(ignore: true) + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ModelDetails _$ModelDetailsFromJson(Map json) { + return _ModelDetails.fromJson(json); +} + +/// @nodoc +mixin _$ModelDetails { + /// The parent model of the model. + @JsonKey(name: 'parent_model', includeIfNull: false) + String? get parentModel => throw _privateConstructorUsedError; + + /// The format of the model. + @JsonKey(includeIfNull: false) + String? get format => throw _privateConstructorUsedError; + + /// The family of the model. + @JsonKey(includeIfNull: false) + String? get family => throw _privateConstructorUsedError; + + /// The families of the model. + @JsonKey(includeIfNull: false) + List? get families => throw _privateConstructorUsedError; + + /// The size of the model's parameters. + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? get parameterSize => throw _privateConstructorUsedError; + + /// The quantization level of the model. + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? get quantizationLevel => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelDetailsCopyWith<$Res> { + factory $ModelDetailsCopyWith( + ModelDetails value, $Res Function(ModelDetails) then) = + _$ModelDetailsCopyWithImpl<$Res, ModelDetails>; + @useResult + $Res call( + {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, + @JsonKey(includeIfNull: false) String? format, + @JsonKey(includeIfNull: false) String? family, + @JsonKey(includeIfNull: false) List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? quantizationLevel}); +} + +/// @nodoc +class _$ModelDetailsCopyWithImpl<$Res, $Val extends ModelDetails> + implements $ModelDetailsCopyWith<$Res> { + _$ModelDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? parentModel = freezed, + Object? format = freezed, + Object? family = freezed, + Object? families = freezed, + Object? parameterSize = freezed, + Object? quantizationLevel = freezed, + }) { + return _then(_value.copyWith( + parentModel: freezed == parentModel + ? _value.parentModel + : parentModel // ignore: cast_nullable_to_non_nullable + as String?, + format: freezed == format + ? _value.format + : format // ignore: cast_nullable_to_non_nullable + as String?, + family: freezed == family + ? _value.family + : family // ignore: cast_nullable_to_non_nullable + as String?, + families: freezed == families + ? _value.families + : families // ignore: cast_nullable_to_non_nullable + as List?, + parameterSize: freezed == parameterSize + ? _value.parameterSize + : parameterSize // ignore: cast_nullable_to_non_nullable + as String?, + quantizationLevel: freezed == quantizationLevel + ? _value.quantizationLevel + : quantizationLevel // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModelDetailsImplCopyWith<$Res> + implements $ModelDetailsCopyWith<$Res> { + factory _$$ModelDetailsImplCopyWith( + _$ModelDetailsImpl value, $Res Function(_$ModelDetailsImpl) then) = + __$$ModelDetailsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, + @JsonKey(includeIfNull: false) String? format, + @JsonKey(includeIfNull: false) String? family, + @JsonKey(includeIfNull: false) List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? quantizationLevel}); +} + +/// @nodoc +class __$$ModelDetailsImplCopyWithImpl<$Res> + extends _$ModelDetailsCopyWithImpl<$Res, _$ModelDetailsImpl> + implements _$$ModelDetailsImplCopyWith<$Res> { + __$$ModelDetailsImplCopyWithImpl( + _$ModelDetailsImpl _value, $Res Function(_$ModelDetailsImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? parentModel = freezed, + Object? format = freezed, + Object? family = freezed, + Object? families = freezed, + Object? parameterSize = freezed, + Object? quantizationLevel = freezed, + }) { + return _then(_$ModelDetailsImpl( + parentModel: freezed == parentModel + ? _value.parentModel + : parentModel // ignore: cast_nullable_to_non_nullable + as String?, + format: freezed == format + ? _value.format + : format // ignore: cast_nullable_to_non_nullable + as String?, + family: freezed == family + ? _value.family + : family // ignore: cast_nullable_to_non_nullable + as String?, + families: freezed == families + ? _value._families + : families // ignore: cast_nullable_to_non_nullable + as List?, + parameterSize: freezed == parameterSize + ? _value.parameterSize + : parameterSize // ignore: cast_nullable_to_non_nullable + as String?, + quantizationLevel: freezed == quantizationLevel + ? _value.quantizationLevel + : quantizationLevel // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelDetailsImpl extends _ModelDetails { + const _$ModelDetailsImpl( + {@JsonKey(name: 'parent_model', includeIfNull: false) this.parentModel, + @JsonKey(includeIfNull: false) this.format, + @JsonKey(includeIfNull: false) this.family, + @JsonKey(includeIfNull: false) final List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) this.parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + this.quantizationLevel}) + : _families = families, + super._(); + + factory _$ModelDetailsImpl.fromJson(Map json) => + _$$ModelDetailsImplFromJson(json); + + /// The parent model of the model. + @override + @JsonKey(name: 'parent_model', includeIfNull: false) + final String? parentModel; + + /// The format of the model. + @override + @JsonKey(includeIfNull: false) + final String? format; + + /// The family of the model. + @override + @JsonKey(includeIfNull: false) + final String? family; + + /// The families of the model. + final List? _families; + + /// The families of the model. + @override + @JsonKey(includeIfNull: false) + List? get families { + final value = _families; + if (value == null) return null; + if (_families is EqualUnmodifiableListView) return _families; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// The size of the model's parameters. + @override + @JsonKey(name: 'parameter_size', includeIfNull: false) + final String? parameterSize; + + /// The quantization level of the model. + @override + @JsonKey(name: 'quantization_level', includeIfNull: false) + final String? quantizationLevel; + + @override + String toString() { + return 'ModelDetails(parentModel: $parentModel, format: $format, family: $family, families: $families, parameterSize: $parameterSize, quantizationLevel: $quantizationLevel)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelDetailsImpl && + (identical(other.parentModel, parentModel) || + other.parentModel == parentModel) && + (identical(other.format, format) || other.format == format) && + (identical(other.family, family) || other.family == family) && + const DeepCollectionEquality().equals(other._families, _families) && + (identical(other.parameterSize, parameterSize) || + other.parameterSize == parameterSize) && + (identical(other.quantizationLevel, quantizationLevel) || + other.quantizationLevel == quantizationLevel)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + parentModel, + format, + family, + const DeepCollectionEquality().hash(_families), + parameterSize, + quantizationLevel); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => + __$$ModelDetailsImplCopyWithImpl<_$ModelDetailsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModelDetailsImplToJson( + this, + ); + } +} + +abstract class _ModelDetails extends ModelDetails { + const factory _ModelDetails( + {@JsonKey(name: 'parent_model', includeIfNull: false) + final String? parentModel, + @JsonKey(includeIfNull: false) final String? format, + @JsonKey(includeIfNull: false) final String? family, + @JsonKey(includeIfNull: false) final List? families, + @JsonKey(name: 'parameter_size', includeIfNull: false) + final String? parameterSize, + @JsonKey(name: 'quantization_level', includeIfNull: false) + final String? quantizationLevel}) = _$ModelDetailsImpl; + const _ModelDetails._() : super._(); + + factory _ModelDetails.fromJson(Map json) = + _$ModelDetailsImpl.fromJson; + + @override + + /// The parent model of the model. + @JsonKey(name: 'parent_model', includeIfNull: false) + String? get parentModel; + @override + + /// The format of the model. + @JsonKey(includeIfNull: false) + String? get format; + @override + + /// The family of the model. + @JsonKey(includeIfNull: false) + String? get family; + @override + + /// The families of the model. + @JsonKey(includeIfNull: false) + List? get families; + @override + + /// The size of the model's parameters. + @JsonKey(name: 'parameter_size', includeIfNull: false) + String? get parameterSize; + @override + + /// The quantization level of the model. + @JsonKey(name: 'quantization_level', includeIfNull: false) + String? get quantizationLevel; + @override + @JsonKey(ignore: true) + _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ModelInformation _$ModelInformationFromJson(Map json) { + return _ModelInformation.fromJson(json); +} + +/// @nodoc +mixin _$ModelInformation { + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture => throw _privateConstructorUsedError; + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount => throw _privateConstructorUsedError; + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ModelsResponseCopyWith get copyWith => + $ModelInformationCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ModelsResponseCopyWith<$Res> { - factory $ModelsResponseCopyWith( - ModelsResponse value, $Res Function(ModelsResponse) then) = - _$ModelsResponseCopyWithImpl<$Res, ModelsResponse>; +abstract class $ModelInformationCopyWith<$Res> { + factory $ModelInformationCopyWith( + ModelInformation value, $Res Function(ModelInformation) then) = + _$ModelInformationCopyWithImpl<$Res, ModelInformation>; @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); } /// @nodoc -class _$ModelsResponseCopyWithImpl<$Res, $Val extends ModelsResponse> - implements $ModelsResponseCopyWith<$Res> { - _$ModelsResponseCopyWithImpl(this._value, this._then); +class _$ModelInformationCopyWithImpl<$Res, $Val extends ModelInformation> + implements $ModelInformationCopyWith<$Res> { + _$ModelInformationCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -4125,473 +5993,427 @@ class _$ModelsResponseCopyWithImpl<$Res, $Val extends ModelsResponse> @pragma('vm:prefer-inline') @override $Res call({ - Object? models = freezed, + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, }) { return _then(_value.copyWith( - models: freezed == models - ? _value.models - : models // ignore: cast_nullable_to_non_nullable - as List?, + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, ) as $Val); } } /// @nodoc -abstract class _$$ModelsResponseImplCopyWith<$Res> - implements $ModelsResponseCopyWith<$Res> { - factory _$$ModelsResponseImplCopyWith(_$ModelsResponseImpl value, - $Res Function(_$ModelsResponseImpl) then) = - __$$ModelsResponseImplCopyWithImpl<$Res>; +abstract class _$$ModelInformationImplCopyWith<$Res> + implements $ModelInformationCopyWith<$Res> { + factory _$$ModelInformationImplCopyWith(_$ModelInformationImpl value, + $Res Function(_$ModelInformationImpl) then) = + __$$ModelInformationImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(includeIfNull: false) List? models}); + $Res call( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? generalQuantizationVersion}); } /// @nodoc -class __$$ModelsResponseImplCopyWithImpl<$Res> - extends _$ModelsResponseCopyWithImpl<$Res, _$ModelsResponseImpl> - implements _$$ModelsResponseImplCopyWith<$Res> { - __$$ModelsResponseImplCopyWithImpl( - _$ModelsResponseImpl _value, $Res Function(_$ModelsResponseImpl) _then) +class __$$ModelInformationImplCopyWithImpl<$Res> + extends _$ModelInformationCopyWithImpl<$Res, _$ModelInformationImpl> + implements _$$ModelInformationImplCopyWith<$Res> { + __$$ModelInformationImplCopyWithImpl(_$ModelInformationImpl _value, + $Res Function(_$ModelInformationImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? models = freezed, + Object? generalArchitecture = freezed, + Object? generalFileType = freezed, + Object? generalParameterCount = freezed, + Object? generalQuantizationVersion = freezed, }) { - return _then(_$ModelsResponseImpl( - models: freezed == models - ? _value._models - : models // ignore: cast_nullable_to_non_nullable - as List?, + return _then(_$ModelInformationImpl( + generalArchitecture: freezed == generalArchitecture + ? _value.generalArchitecture + : generalArchitecture // ignore: cast_nullable_to_non_nullable + as String?, + generalFileType: freezed == generalFileType + ? _value.generalFileType + : generalFileType // ignore: cast_nullable_to_non_nullable + as int?, + generalParameterCount: freezed == generalParameterCount + ? _value.generalParameterCount + : generalParameterCount // ignore: cast_nullable_to_non_nullable + as int?, + generalQuantizationVersion: freezed == generalQuantizationVersion + ? _value.generalQuantizationVersion + : generalQuantizationVersion // ignore: cast_nullable_to_non_nullable + as int?, )); } } /// @nodoc @JsonSerializable() -class _$ModelsResponseImpl extends _ModelsResponse { - const _$ModelsResponseImpl( - {@JsonKey(includeIfNull: false) final List? models}) - : _models = models, - super._(); +class _$ModelInformationImpl extends _ModelInformation { + const _$ModelInformationImpl( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + this.generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + this.generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + this.generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + this.generalQuantizationVersion}) + : super._(); - factory _$ModelsResponseImpl.fromJson(Map json) => - _$$ModelsResponseImplFromJson(json); + factory _$ModelInformationImpl.fromJson(Map json) => + _$$ModelInformationImplFromJson(json); - /// List of models available locally. - final List? _models; + /// The architecture of the model. + @override + @JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture; - /// List of models available locally. + /// The file type of the model. @override - @JsonKey(includeIfNull: false) - List? get models { - final value = _models; - if (value == null) return null; - if (_models is EqualUnmodifiableListView) return _models; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount; + + /// The number of parameters in the model. + @override + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion; @override String toString() { - return 'ModelsResponse(models: $models)'; + return 'ModelInformation(generalArchitecture: $generalArchitecture, generalFileType: $generalFileType, generalParameterCount: $generalParameterCount, generalQuantizationVersion: $generalQuantizationVersion)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelsResponseImpl && - const DeepCollectionEquality().equals(other._models, _models)); + other is _$ModelInformationImpl && + (identical(other.generalArchitecture, generalArchitecture) || + other.generalArchitecture == generalArchitecture) && + (identical(other.generalFileType, generalFileType) || + other.generalFileType == generalFileType) && + (identical(other.generalParameterCount, generalParameterCount) || + other.generalParameterCount == generalParameterCount) && + (identical(other.generalQuantizationVersion, + generalQuantizationVersion) || + other.generalQuantizationVersion == + generalQuantizationVersion)); } @JsonKey(ignore: true) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); + int get hashCode => Object.hash(runtimeType, generalArchitecture, + generalFileType, generalParameterCount, generalQuantizationVersion); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => - __$$ModelsResponseImplCopyWithImpl<_$ModelsResponseImpl>( + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => + __$$ModelInformationImplCopyWithImpl<_$ModelInformationImpl>( this, _$identity); @override Map toJson() { - return _$$ModelsResponseImplToJson( + return _$$ModelInformationImplToJson( this, ); } } -abstract class _ModelsResponse extends ModelsResponse { - const factory _ModelsResponse( - {@JsonKey(includeIfNull: false) final List? models}) = - _$ModelsResponseImpl; - const _ModelsResponse._() : super._(); +abstract class _ModelInformation extends ModelInformation { + const factory _ModelInformation( + {@JsonKey(name: 'general.architecture', includeIfNull: false) + final String? generalArchitecture, + @JsonKey(name: 'general.file_type', includeIfNull: false) + final int? generalFileType, + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + final int? generalParameterCount, + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + final int? generalQuantizationVersion}) = _$ModelInformationImpl; + const _ModelInformation._() : super._(); - factory _ModelsResponse.fromJson(Map json) = - _$ModelsResponseImpl.fromJson; + factory _ModelInformation.fromJson(Map json) = + _$ModelInformationImpl.fromJson; @override - /// List of models available locally. - @JsonKey(includeIfNull: false) - List? get models; + /// The architecture of the model. + @JsonKey(name: 'general.architecture', includeIfNull: false) + String? get generalArchitecture; + @override + + /// The file type of the model. + @JsonKey(name: 'general.file_type', includeIfNull: false) + int? get generalFileType; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.parameter_count', includeIfNull: false) + int? get generalParameterCount; + @override + + /// The number of parameters in the model. + @JsonKey(name: 'general.quantization_version', includeIfNull: false) + int? get generalQuantizationVersion; @override @JsonKey(ignore: true) - _$$ModelsResponseImplCopyWith<_$ModelsResponseImpl> get copyWith => + _$$ModelInformationImplCopyWith<_$ModelInformationImpl> get copyWith => throw _privateConstructorUsedError; } -Model _$ModelFromJson(Map json) { - return _Model.fromJson(json); +ProcessResponse _$ProcessResponseFromJson(Map json) { + return _ProcessResponse.fromJson(json); } /// @nodoc -mixin _$Model { - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; - - /// Model modification date. - @JsonKey(name: 'modified_at', includeIfNull: false) - String? get modifiedAt => throw _privateConstructorUsedError; - - /// Size of the model on disk. - @JsonKey(includeIfNull: false) - int? get size => throw _privateConstructorUsedError; - - /// The model's digest. - @JsonKey(includeIfNull: false) - String? get digest => throw _privateConstructorUsedError; - - /// Details about a model. +mixin _$ProcessResponse { + /// List of running models. @JsonKey(includeIfNull: false) - ModelDetails? get details => throw _privateConstructorUsedError; + List? get models => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ModelCopyWith get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ModelCopyWith<$Res> { - factory $ModelCopyWith(Model value, $Res Function(Model) then) = - _$ModelCopyWithImpl<$Res, Model>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details}); - - $ModelDetailsCopyWith<$Res>? get details; + $ProcessResponseCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -class _$ModelCopyWithImpl<$Res, $Val extends Model> - implements $ModelCopyWith<$Res> { - _$ModelCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? model = freezed, - Object? modifiedAt = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, - }) { - return _then(_value.copyWith( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - modifiedAt: freezed == modifiedAt - ? _value.modifiedAt - : modifiedAt // ignore: cast_nullable_to_non_nullable - as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable - as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, - ) as $Val); - } +abstract class $ProcessResponseCopyWith<$Res> { + factory $ProcessResponseCopyWith( + ProcessResponse value, $Res Function(ProcessResponse) then) = + _$ProcessResponseCopyWithImpl<$Res, ProcessResponse>; + @useResult + $Res call({@JsonKey(includeIfNull: false) List? models}); +} - @override - @pragma('vm:prefer-inline') - $ModelDetailsCopyWith<$Res>? get details { - if (_value.details == null) { - return null; - } +/// @nodoc +class _$ProcessResponseCopyWithImpl<$Res, $Val extends ProcessResponse> + implements $ProcessResponseCopyWith<$Res> { + _$ProcessResponseCopyWithImpl(this._value, this._then); - return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { - return _then(_value.copyWith(details: value) as $Val); - }); + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? models = freezed, + }) { + return _then(_value.copyWith( + models: freezed == models + ? _value.models + : models // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); } } /// @nodoc -abstract class _$$ModelImplCopyWith<$Res> implements $ModelCopyWith<$Res> { - factory _$$ModelImplCopyWith( - _$ModelImpl value, $Res Function(_$ModelImpl) then) = - __$$ModelImplCopyWithImpl<$Res>; +abstract class _$$ProcessResponseImplCopyWith<$Res> + implements $ProcessResponseCopyWith<$Res> { + factory _$$ProcessResponseImplCopyWith(_$ProcessResponseImpl value, + $Res Function(_$ProcessResponseImpl) then) = + __$$ProcessResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) String? modifiedAt, - @JsonKey(includeIfNull: false) int? size, - @JsonKey(includeIfNull: false) String? digest, - @JsonKey(includeIfNull: false) ModelDetails? details}); - - @override - $ModelDetailsCopyWith<$Res>? get details; + $Res call({@JsonKey(includeIfNull: false) List? models}); } /// @nodoc -class __$$ModelImplCopyWithImpl<$Res> - extends _$ModelCopyWithImpl<$Res, _$ModelImpl> - implements _$$ModelImplCopyWith<$Res> { - __$$ModelImplCopyWithImpl( - _$ModelImpl _value, $Res Function(_$ModelImpl) _then) +class __$$ProcessResponseImplCopyWithImpl<$Res> + extends _$ProcessResponseCopyWithImpl<$Res, _$ProcessResponseImpl> + implements _$$ProcessResponseImplCopyWith<$Res> { + __$$ProcessResponseImplCopyWithImpl( + _$ProcessResponseImpl _value, $Res Function(_$ProcessResponseImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = freezed, - Object? modifiedAt = freezed, - Object? size = freezed, - Object? digest = freezed, - Object? details = freezed, + Object? models = freezed, }) { - return _then(_$ModelImpl( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - modifiedAt: freezed == modifiedAt - ? _value.modifiedAt - : modifiedAt // ignore: cast_nullable_to_non_nullable - as String?, - size: freezed == size - ? _value.size - : size // ignore: cast_nullable_to_non_nullable - as int?, - digest: freezed == digest - ? _value.digest - : digest // ignore: cast_nullable_to_non_nullable - as String?, - details: freezed == details - ? _value.details - : details // ignore: cast_nullable_to_non_nullable - as ModelDetails?, + return _then(_$ProcessResponseImpl( + models: freezed == models + ? _value._models + : models // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$ModelImpl extends _Model { - const _$ModelImpl( - {@JsonKey(includeIfNull: false) this.model, - @JsonKey(name: 'modified_at', includeIfNull: false) this.modifiedAt, - @JsonKey(includeIfNull: false) this.size, - @JsonKey(includeIfNull: false) this.digest, - @JsonKey(includeIfNull: false) this.details}) - : super._(); - - factory _$ModelImpl.fromJson(Map json) => - _$$ModelImplFromJson(json); - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @override - @JsonKey(includeIfNull: false) - final String? model; - - /// Model modification date. - @override - @JsonKey(name: 'modified_at', includeIfNull: false) - final String? modifiedAt; +class _$ProcessResponseImpl extends _ProcessResponse { + const _$ProcessResponseImpl( + {@JsonKey(includeIfNull: false) final List? models}) + : _models = models, + super._(); - /// Size of the model on disk. - @override - @JsonKey(includeIfNull: false) - final int? size; + factory _$ProcessResponseImpl.fromJson(Map json) => + _$$ProcessResponseImplFromJson(json); - /// The model's digest. - @override - @JsonKey(includeIfNull: false) - final String? digest; + /// List of running models. + final List? _models; - /// Details about a model. + /// List of running models. @override @JsonKey(includeIfNull: false) - final ModelDetails? details; + List? get models { + final value = _models; + if (value == null) return null; + if (_models is EqualUnmodifiableListView) return _models; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'Model(model: $model, modifiedAt: $modifiedAt, size: $size, digest: $digest, details: $details)'; + return 'ProcessResponse(models: $models)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.modifiedAt, modifiedAt) || - other.modifiedAt == modifiedAt) && - (identical(other.size, size) || other.size == size) && - (identical(other.digest, digest) || other.digest == digest) && - (identical(other.details, details) || other.details == details)); + other is _$ProcessResponseImpl && + const DeepCollectionEquality().equals(other._models, _models)); } @JsonKey(ignore: true) @override int get hashCode => - Object.hash(runtimeType, model, modifiedAt, size, digest, details); + Object.hash(runtimeType, const DeepCollectionEquality().hash(_models)); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelImplCopyWith<_$ModelImpl> get copyWith => - __$$ModelImplCopyWithImpl<_$ModelImpl>(this, _$identity); + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => + __$$ProcessResponseImplCopyWithImpl<_$ProcessResponseImpl>( + this, _$identity); @override Map toJson() { - return _$$ModelImplToJson( + return _$$ProcessResponseImplToJson( this, ); } } -abstract class _Model extends Model { - const factory _Model( - {@JsonKey(includeIfNull: false) final String? model, - @JsonKey(name: 'modified_at', includeIfNull: false) - final String? modifiedAt, - @JsonKey(includeIfNull: false) final int? size, - @JsonKey(includeIfNull: false) final String? digest, - @JsonKey(includeIfNull: false) final ModelDetails? details}) = - _$ModelImpl; - const _Model._() : super._(); - - factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - - @override - - /// The model name. - /// - /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - @JsonKey(includeIfNull: false) - String? get model; - @override +abstract class _ProcessResponse extends ProcessResponse { + const factory _ProcessResponse( + {@JsonKey(includeIfNull: false) final List? models}) = + _$ProcessResponseImpl; + const _ProcessResponse._() : super._(); - /// Model modification date. - @JsonKey(name: 'modified_at', includeIfNull: false) - String? get modifiedAt; - @override - - /// Size of the model on disk. - @JsonKey(includeIfNull: false) - int? get size; - @override + factory _ProcessResponse.fromJson(Map json) = + _$ProcessResponseImpl.fromJson; - /// The model's digest. - @JsonKey(includeIfNull: false) - String? get digest; @override - /// Details about a model. + /// List of running models. @JsonKey(includeIfNull: false) - ModelDetails? get details; + List? get models; @override @JsonKey(ignore: true) - _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + _$$ProcessResponseImplCopyWith<_$ProcessResponseImpl> get copyWith => throw _privateConstructorUsedError; } -ModelDetails _$ModelDetailsFromJson(Map json) { - return _ModelDetails.fromJson(json); +ProcessModel _$ProcessModelFromJson(Map json) { + return _ProcessModel.fromJson(json); } /// @nodoc -mixin _$ModelDetails { - /// The parent model of the model. - @JsonKey(name: 'parent_model', includeIfNull: false) - String? get parentModel => throw _privateConstructorUsedError; +mixin _$ProcessModel { + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; - /// The format of the model. + /// Size of the model on disk. @JsonKey(includeIfNull: false) - String? get format => throw _privateConstructorUsedError; + int? get size => throw _privateConstructorUsedError; - /// The family of the model. + /// The model's digest. @JsonKey(includeIfNull: false) - String? get family => throw _privateConstructorUsedError; + String? get digest => throw _privateConstructorUsedError; - /// The families of the model. + /// Details about a model. @JsonKey(includeIfNull: false) - List? get families => throw _privateConstructorUsedError; + ModelDetails? get details => throw _privateConstructorUsedError; - /// The size of the model's parameters. - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? get parameterSize => throw _privateConstructorUsedError; + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt => throw _privateConstructorUsedError; - /// The quantization level of the model. - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? get quantizationLevel => throw _privateConstructorUsedError; + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram => throw _privateConstructorUsedError; Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) - $ModelDetailsCopyWith get copyWith => + $ProcessModelCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ModelDetailsCopyWith<$Res> { - factory $ModelDetailsCopyWith( - ModelDetails value, $Res Function(ModelDetails) then) = - _$ModelDetailsCopyWithImpl<$Res, ModelDetails>; +abstract class $ProcessModelCopyWith<$Res> { + factory $ProcessModelCopyWith( + ProcessModel value, $Res Function(ProcessModel) then) = + _$ProcessModelCopyWithImpl<$Res, ProcessModel>; @useResult $Res call( - {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, - @JsonKey(includeIfNull: false) String? format, - @JsonKey(includeIfNull: false) String? family, - @JsonKey(includeIfNull: false) List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? quantizationLevel}); + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + $ModelDetailsCopyWith<$Res>? get details; } /// @nodoc -class _$ModelDetailsCopyWithImpl<$Res, $Val extends ModelDetails> - implements $ModelDetailsCopyWith<$Res> { - _$ModelDetailsCopyWithImpl(this._value, this._then); +class _$ProcessModelCopyWithImpl<$Res, $Val extends ProcessModel> + implements $ProcessModelCopyWith<$Res> { + _$ProcessModelCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; @@ -4601,259 +6423,257 @@ class _$ModelDetailsCopyWithImpl<$Res, $Val extends ModelDetails> @pragma('vm:prefer-inline') @override $Res call({ - Object? parentModel = freezed, - Object? format = freezed, - Object? family = freezed, - Object? families = freezed, - Object? parameterSize = freezed, - Object? quantizationLevel = freezed, + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, }) { return _then(_value.copyWith( - parentModel: freezed == parentModel - ? _value.parentModel - : parentModel // ignore: cast_nullable_to_non_nullable - as String?, - format: freezed == format - ? _value.format - : format // ignore: cast_nullable_to_non_nullable - as String?, - family: freezed == family - ? _value.family - : family // ignore: cast_nullable_to_non_nullable + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable as String?, - families: freezed == families - ? _value.families - : families // ignore: cast_nullable_to_non_nullable - as List?, - parameterSize: freezed == parameterSize - ? _value.parameterSize - : parameterSize // ignore: cast_nullable_to_non_nullable + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable as String?, - quantizationLevel: freezed == quantizationLevel - ? _value.quantizationLevel - : quantizationLevel // ignore: cast_nullable_to_non_nullable + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ModelDetailsCopyWith<$Res>? get details { + if (_value.details == null) { + return null; + } + + return $ModelDetailsCopyWith<$Res>(_value.details!, (value) { + return _then(_value.copyWith(details: value) as $Val); + }); + } } /// @nodoc -abstract class _$$ModelDetailsImplCopyWith<$Res> - implements $ModelDetailsCopyWith<$Res> { - factory _$$ModelDetailsImplCopyWith( - _$ModelDetailsImpl value, $Res Function(_$ModelDetailsImpl) then) = - __$$ModelDetailsImplCopyWithImpl<$Res>; +abstract class _$$ProcessModelImplCopyWith<$Res> + implements $ProcessModelCopyWith<$Res> { + factory _$$ProcessModelImplCopyWith( + _$ProcessModelImpl value, $Res Function(_$ProcessModelImpl) then) = + __$$ProcessModelImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'parent_model', includeIfNull: false) String? parentModel, - @JsonKey(includeIfNull: false) String? format, - @JsonKey(includeIfNull: false) String? family, - @JsonKey(includeIfNull: false) List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? quantizationLevel}); + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) int? size, + @JsonKey(includeIfNull: false) String? digest, + @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) int? sizeVram}); + + @override + $ModelDetailsCopyWith<$Res>? get details; } /// @nodoc -class __$$ModelDetailsImplCopyWithImpl<$Res> - extends _$ModelDetailsCopyWithImpl<$Res, _$ModelDetailsImpl> - implements _$$ModelDetailsImplCopyWith<$Res> { - __$$ModelDetailsImplCopyWithImpl( - _$ModelDetailsImpl _value, $Res Function(_$ModelDetailsImpl) _then) +class __$$ProcessModelImplCopyWithImpl<$Res> + extends _$ProcessModelCopyWithImpl<$Res, _$ProcessModelImpl> + implements _$$ProcessModelImplCopyWith<$Res> { + __$$ProcessModelImplCopyWithImpl( + _$ProcessModelImpl _value, $Res Function(_$ProcessModelImpl) _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? parentModel = freezed, - Object? format = freezed, - Object? family = freezed, - Object? families = freezed, - Object? parameterSize = freezed, - Object? quantizationLevel = freezed, + Object? model = freezed, + Object? size = freezed, + Object? digest = freezed, + Object? details = freezed, + Object? expiresAt = freezed, + Object? sizeVram = freezed, }) { - return _then(_$ModelDetailsImpl( - parentModel: freezed == parentModel - ? _value.parentModel - : parentModel // ignore: cast_nullable_to_non_nullable - as String?, - format: freezed == format - ? _value.format - : format // ignore: cast_nullable_to_non_nullable - as String?, - family: freezed == family - ? _value.family - : family // ignore: cast_nullable_to_non_nullable + return _then(_$ProcessModelImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable as String?, - families: freezed == families - ? _value._families - : families // ignore: cast_nullable_to_non_nullable - as List?, - parameterSize: freezed == parameterSize - ? _value.parameterSize - : parameterSize // ignore: cast_nullable_to_non_nullable + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as int?, + digest: freezed == digest + ? _value.digest + : digest // ignore: cast_nullable_to_non_nullable as String?, - quantizationLevel: freezed == quantizationLevel - ? _value.quantizationLevel - : quantizationLevel // ignore: cast_nullable_to_non_nullable + details: freezed == details + ? _value.details + : details // ignore: cast_nullable_to_non_nullable + as ModelDetails?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable as String?, + sizeVram: freezed == sizeVram + ? _value.sizeVram + : sizeVram // ignore: cast_nullable_to_non_nullable + as int?, )); } } /// @nodoc @JsonSerializable() -class _$ModelDetailsImpl extends _ModelDetails { - const _$ModelDetailsImpl( - {@JsonKey(name: 'parent_model', includeIfNull: false) this.parentModel, - @JsonKey(includeIfNull: false) this.format, - @JsonKey(includeIfNull: false) this.family, - @JsonKey(includeIfNull: false) final List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) this.parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - this.quantizationLevel}) - : _families = families, - super._(); +class _$ProcessModelImpl extends _ProcessModel { + const _$ProcessModelImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(includeIfNull: false) this.size, + @JsonKey(includeIfNull: false) this.digest, + @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) this.sizeVram}) + : super._(); - factory _$ModelDetailsImpl.fromJson(Map json) => - _$$ModelDetailsImplFromJson(json); + factory _$ProcessModelImpl.fromJson(Map json) => + _$$ProcessModelImplFromJson(json); - /// The parent model of the model. + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. @override - @JsonKey(name: 'parent_model', includeIfNull: false) - final String? parentModel; + @JsonKey(includeIfNull: false) + final String? model; - /// The format of the model. + /// Size of the model on disk. @override @JsonKey(includeIfNull: false) - final String? format; + final int? size; - /// The family of the model. + /// The model's digest. @override @JsonKey(includeIfNull: false) - final String? family; - - /// The families of the model. - final List? _families; + final String? digest; - /// The families of the model. + /// Details about a model. @override @JsonKey(includeIfNull: false) - List? get families { - final value = _families; - if (value == null) return null; - if (_families is EqualUnmodifiableListView) return _families; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + final ModelDetails? details; - /// The size of the model's parameters. + /// No Description @override - @JsonKey(name: 'parameter_size', includeIfNull: false) - final String? parameterSize; + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt; - /// The quantization level of the model. + /// Size of the model on disk. @override - @JsonKey(name: 'quantization_level', includeIfNull: false) - final String? quantizationLevel; + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram; @override String toString() { - return 'ModelDetails(parentModel: $parentModel, format: $format, family: $family, families: $families, parameterSize: $parameterSize, quantizationLevel: $quantizationLevel)'; + return 'ProcessModel(model: $model, size: $size, digest: $digest, details: $details, expiresAt: $expiresAt, sizeVram: $sizeVram)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModelDetailsImpl && - (identical(other.parentModel, parentModel) || - other.parentModel == parentModel) && - (identical(other.format, format) || other.format == format) && - (identical(other.family, family) || other.family == family) && - const DeepCollectionEquality().equals(other._families, _families) && - (identical(other.parameterSize, parameterSize) || - other.parameterSize == parameterSize) && - (identical(other.quantizationLevel, quantizationLevel) || - other.quantizationLevel == quantizationLevel)); + other is _$ProcessModelImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.size, size) || other.size == size) && + (identical(other.digest, digest) || other.digest == digest) && + (identical(other.details, details) || other.details == details) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.sizeVram, sizeVram) || + other.sizeVram == sizeVram)); } @JsonKey(ignore: true) @override int get hashCode => Object.hash( - runtimeType, - parentModel, - format, - family, - const DeepCollectionEquality().hash(_families), - parameterSize, - quantizationLevel); + runtimeType, model, size, digest, details, expiresAt, sizeVram); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => - __$$ModelDetailsImplCopyWithImpl<_$ModelDetailsImpl>(this, _$identity); + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => + __$$ProcessModelImplCopyWithImpl<_$ProcessModelImpl>(this, _$identity); @override Map toJson() { - return _$$ModelDetailsImplToJson( + return _$$ProcessModelImplToJson( this, ); } } -abstract class _ModelDetails extends ModelDetails { - const factory _ModelDetails( - {@JsonKey(name: 'parent_model', includeIfNull: false) - final String? parentModel, - @JsonKey(includeIfNull: false) final String? format, - @JsonKey(includeIfNull: false) final String? family, - @JsonKey(includeIfNull: false) final List? families, - @JsonKey(name: 'parameter_size', includeIfNull: false) - final String? parameterSize, - @JsonKey(name: 'quantization_level', includeIfNull: false) - final String? quantizationLevel}) = _$ModelDetailsImpl; - const _ModelDetails._() : super._(); +abstract class _ProcessModel extends ProcessModel { + const factory _ProcessModel( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(includeIfNull: false) final int? size, + @JsonKey(includeIfNull: false) final String? digest, + @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'expires_at', includeIfNull: false) + final String? expiresAt, + @JsonKey(name: 'size_vram', includeIfNull: false) + final int? sizeVram}) = _$ProcessModelImpl; + const _ProcessModel._() : super._(); - factory _ModelDetails.fromJson(Map json) = - _$ModelDetailsImpl.fromJson; + factory _ProcessModel.fromJson(Map json) = + _$ProcessModelImpl.fromJson; @override - /// The parent model of the model. - @JsonKey(name: 'parent_model', includeIfNull: false) - String? get parentModel; + /// The model name. + /// + /// Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. + @JsonKey(includeIfNull: false) + String? get model; @override - /// The format of the model. + /// Size of the model on disk. @JsonKey(includeIfNull: false) - String? get format; + int? get size; @override - /// The family of the model. + /// The model's digest. @JsonKey(includeIfNull: false) - String? get family; + String? get digest; @override - /// The families of the model. + /// Details about a model. @JsonKey(includeIfNull: false) - List? get families; + ModelDetails? get details; @override - /// The size of the model's parameters. - @JsonKey(name: 'parameter_size', includeIfNull: false) - String? get parameterSize; + /// No Description + @JsonKey(name: 'expires_at', includeIfNull: false) + String? get expiresAt; @override - /// The quantization level of the model. - @JsonKey(name: 'quantization_level', includeIfNull: false) - String? get quantizationLevel; + /// Size of the model on disk. + @JsonKey(name: 'size_vram', includeIfNull: false) + int? get sizeVram; @override @JsonKey(ignore: true) - _$$ModelDetailsImplCopyWith<_$ModelDetailsImpl> get copyWith => + _$$ProcessModelImplCopyWith<_$ProcessModelImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5036,6 +6856,10 @@ mixin _$ModelInfo { @JsonKey(includeIfNull: false) ModelDetails? get details => throw _privateConstructorUsedError; + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo => throw _privateConstructorUsedError; + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages => throw _privateConstructorUsedError; @@ -5058,9 +6882,12 @@ abstract class $ModelInfoCopyWith<$Res> { @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); $ModelDetailsCopyWith<$Res>? get details; + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5082,6 +6909,7 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_value.copyWith( @@ -5109,6 +6937,10 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value.messages : messages // ignore: cast_nullable_to_non_nullable @@ -5127,6 +6959,18 @@ class _$ModelInfoCopyWithImpl<$Res, $Val extends ModelInfo> return _then(_value.copyWith(details: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $ModelInformationCopyWith<$Res>? get modelInfo { + if (_value.modelInfo == null) { + return null; + } + + return $ModelInformationCopyWith<$Res>(_value.modelInfo!, (value) { + return _then(_value.copyWith(modelInfo: value) as $Val); + }); + } } /// @nodoc @@ -5144,10 +6988,14 @@ abstract class _$$ModelInfoImplCopyWith<$Res> @JsonKey(includeIfNull: false) String? template, @JsonKey(includeIfNull: false) String? system, @JsonKey(includeIfNull: false) ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? modelInfo, @JsonKey(includeIfNull: false) List? messages}); @override $ModelDetailsCopyWith<$Res>? get details; + @override + $ModelInformationCopyWith<$Res>? get modelInfo; } /// @nodoc @@ -5167,6 +7015,7 @@ class __$$ModelInfoImplCopyWithImpl<$Res> Object? template = freezed, Object? system = freezed, Object? details = freezed, + Object? modelInfo = freezed, Object? messages = freezed, }) { return _then(_$ModelInfoImpl( @@ -5194,6 +7043,10 @@ class __$$ModelInfoImplCopyWithImpl<$Res> ? _value.details : details // ignore: cast_nullable_to_non_nullable as ModelDetails?, + modelInfo: freezed == modelInfo + ? _value.modelInfo + : modelInfo // ignore: cast_nullable_to_non_nullable + as ModelInformation?, messages: freezed == messages ? _value._messages : messages // ignore: cast_nullable_to_non_nullable @@ -5212,6 +7065,7 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) this.template, @JsonKey(includeIfNull: false) this.system, @JsonKey(includeIfNull: false) this.details, + @JsonKey(name: 'model_info', includeIfNull: false) this.modelInfo, @JsonKey(includeIfNull: false) final List? messages}) : _messages = messages, super._(); @@ -5249,6 +7103,11 @@ class _$ModelInfoImpl extends _ModelInfo { @JsonKey(includeIfNull: false) final ModelDetails? details; + /// Details about a model. + @override + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo; + /// The default messages for the model. final List? _messages; @@ -5265,7 +7124,7 @@ class _$ModelInfoImpl extends _ModelInfo { @override String toString() { - return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, messages: $messages)'; + return 'ModelInfo(license: $license, modelfile: $modelfile, parameters: $parameters, template: $template, system: $system, details: $details, modelInfo: $modelInfo, messages: $messages)'; } @override @@ -5282,6 +7141,8 @@ class _$ModelInfoImpl extends _ModelInfo { other.template == template) && (identical(other.system, system) || other.system == system) && (identical(other.details, details) || other.details == details) && + (identical(other.modelInfo, modelInfo) || + other.modelInfo == modelInfo) && const DeepCollectionEquality().equals(other._messages, _messages)); } @@ -5295,6 +7156,7 @@ class _$ModelInfoImpl extends _ModelInfo { template, system, details, + modelInfo, const DeepCollectionEquality().hash(_messages)); @JsonKey(ignore: true) @@ -5319,6 +7181,8 @@ abstract class _ModelInfo extends ModelInfo { @JsonKey(includeIfNull: false) final String? template, @JsonKey(includeIfNull: false) final String? system, @JsonKey(includeIfNull: false) final ModelDetails? details, + @JsonKey(name: 'model_info', includeIfNull: false) + final ModelInformation? modelInfo, @JsonKey(includeIfNull: false) final List? messages}) = _$ModelInfoImpl; const _ModelInfo._() : super._(); @@ -5358,6 +7222,11 @@ abstract class _ModelInfo extends ModelInfo { ModelDetails? get details; @override + /// Details about a model. + @JsonKey(name: 'model_info', includeIfNull: false) + ModelInformation? get modelInfo; + @override + /// The default messages for the model. @JsonKey(includeIfNull: false) List? get messages; @@ -6491,9 +8360,8 @@ PushModelResponse _$PushModelResponseFromJson(Map json) { /// @nodoc mixin _$PushModelResponse { /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get status => throw _privateConstructorUsedError; /// the model's digest @JsonKey(includeIfNull: false) @@ -6520,10 +8388,7 @@ abstract class $PushModelResponseCopyWith<$Res> { _$PushModelResponseCopyWithImpl<$Res, PushModelResponse>; @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -6551,7 +8416,7 @@ class _$PushModelResponseCopyWithImpl<$Res, $Val extends PushModelResponse> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -6577,10 +8442,7 @@ abstract class _$$PushModelResponseImplCopyWith<$Res> @override @useResult $Res call( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? status, + {@JsonKey(includeIfNull: false) String? status, @JsonKey(includeIfNull: false) String? digest, @JsonKey(includeIfNull: false) int? total, @JsonKey(includeIfNull: false) int? completed}); @@ -6606,7 +8468,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> status: freezed == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as PushModelStatus?, + as String?, digest: freezed == digest ? _value.digest : digest // ignore: cast_nullable_to_non_nullable @@ -6627,10 +8489,7 @@ class __$$PushModelResponseImplCopyWithImpl<$Res> @JsonSerializable() class _$PushModelResponseImpl extends _PushModelResponse { const _$PushModelResponseImpl( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - this.status, + {@JsonKey(includeIfNull: false) this.status, @JsonKey(includeIfNull: false) this.digest, @JsonKey(includeIfNull: false) this.total, @JsonKey(includeIfNull: false) this.completed}) @@ -6641,9 +8500,8 @@ class _$PushModelResponseImpl extends _PushModelResponse { /// Status pushing the model. @override - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status; + @JsonKey(includeIfNull: false) + final String? status; /// the model's digest @override @@ -6699,10 +8557,7 @@ class _$PushModelResponseImpl extends _PushModelResponse { abstract class _PushModelResponse extends PushModelResponse { const factory _PushModelResponse( - {@JsonKey( - includeIfNull: false, - unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - final PushModelStatus? status, + {@JsonKey(includeIfNull: false) final String? status, @JsonKey(includeIfNull: false) final String? digest, @JsonKey(includeIfNull: false) final int? total, @JsonKey(includeIfNull: false) final int? completed}) = @@ -6715,9 +8570,8 @@ abstract class _PushModelResponse extends PushModelResponse { @override /// Status pushing the model. - @JsonKey( - includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) - PushModelStatus? get status; + @JsonKey(includeIfNull: false) + String? get status; @override /// the model's digest diff --git a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart index f5548646..473e7825 100644 --- a/packages/ollama_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/ollama_dart/lib/src/generated/schema/schema.g.dart @@ -13,6 +13,7 @@ _$GenerateCompletionRequestImpl _$$GenerateCompletionRequestImplFromJson( _$GenerateCompletionRequestImpl( model: json['model'] as String, prompt: json['prompt'] as String, + suffix: json['suffix'] as String?, images: (json['images'] as List?)?.map((e) => e as String).toList(), system: json['system'] as String?, @@ -42,6 +43,7 @@ Map _$$GenerateCompletionRequestImplToJson( } } + writeNotNull('suffix', instance.suffix); writeNotNull('images', instance.images); writeNotNull('system', instance.system); writeNotNull('template', instance.template); @@ -65,6 +67,7 @@ _$RequestOptionsImpl _$$RequestOptionsImplFromJson(Map json) => numPredict: json['num_predict'] as int?, topK: json['top_k'] as int?, topP: (json['top_p'] as num?)?.toDouble(), + minP: (json['min_p'] as num?)?.toDouble(), tfsZ: (json['tfs_z'] as num?)?.toDouble(), typicalP: (json['typical_p'] as num?)?.toDouble(), repeatLastN: json['repeat_last_n'] as int?, @@ -106,6 +109,7 @@ Map _$$RequestOptionsImplToJson( writeNotNull('num_predict', instance.numPredict); writeNotNull('top_k', instance.topK); writeNotNull('top_p', instance.topP); + writeNotNull('min_p', instance.minP); writeNotNull('tfs_z', instance.tfsZ); writeNotNull('typical_p', instance.typicalP); writeNotNull('repeat_last_n', instance.repeatLastN); @@ -133,6 +137,26 @@ Map _$$RequestOptionsImplToJson( return val; } +_$VersionResponseImpl _$$VersionResponseImplFromJson( + Map json) => + _$VersionResponseImpl( + version: json['version'] as String?, + ); + +Map _$$VersionResponseImplToJson( + _$VersionResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('version', instance.version); + return val; +} + _$GenerateCompletionResponseImpl _$$GenerateCompletionResponseImplFromJson( Map json) => _$GenerateCompletionResponseImpl( @@ -189,6 +213,9 @@ _$GenerateChatCompletionRequestImpl json['options'] as Map), stream: json['stream'] as bool? ?? false, keepAlive: json['keep_alive'] as int?, + tools: (json['tools'] as List?) + ?.map((e) => Tool.fromJson(e as Map)) + .toList(), ); Map _$$GenerateChatCompletionRequestImplToJson( @@ -208,18 +235,17 @@ Map _$$GenerateChatCompletionRequestImplToJson( writeNotNull('options', instance.options?.toJson()); val['stream'] = instance.stream; writeNotNull('keep_alive', instance.keepAlive); + writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); return val; } _$GenerateChatCompletionResponseImpl _$$GenerateChatCompletionResponseImplFromJson(Map json) => _$GenerateChatCompletionResponseImpl( - message: json['message'] == null - ? null - : Message.fromJson(json['message'] as Map), - model: json['model'] as String?, - createdAt: json['created_at'] as String?, - done: json['done'] as bool?, + message: Message.fromJson(json['message'] as Map), + model: json['model'] as String, + createdAt: json['created_at'] as String, + done: json['done'] as bool, doneReason: $enumDecodeNullable( _$DoneReasonEnumMap, json['done_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -233,7 +259,12 @@ _$GenerateChatCompletionResponseImpl Map _$$GenerateChatCompletionResponseImplToJson( _$GenerateChatCompletionResponseImpl instance) { - final val = {}; + final val = { + 'message': instance.message.toJson(), + 'model': instance.model, + 'created_at': instance.createdAt, + 'done': instance.done, + }; void writeNotNull(String key, dynamic value) { if (value != null) { @@ -241,10 +272,6 @@ Map _$$GenerateChatCompletionResponseImplToJson( } } - writeNotNull('message', instance.message?.toJson()); - writeNotNull('model', instance.model); - writeNotNull('created_at', instance.createdAt); - writeNotNull('done', instance.done); writeNotNull('done_reason', _$DoneReasonEnumMap[instance.doneReason]); writeNotNull('total_duration', instance.totalDuration); writeNotNull('load_duration', instance.loadDuration); @@ -267,6 +294,9 @@ _$MessageImpl _$$MessageImplFromJson(Map json) => content: json['content'] as String, images: (json['images'] as List?)?.map((e) => e as String).toList(), + toolCalls: (json['tool_calls'] as List?) + ?.map((e) => ToolCall.fromJson(e as Map)) + .toList(), ); Map _$$MessageImplToJson(_$MessageImpl instance) { @@ -282,6 +312,8 @@ Map _$$MessageImplToJson(_$MessageImpl instance) { } writeNotNull('images', instance.images); + writeNotNull( + 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); return val; } @@ -289,8 +321,84 @@ const _$MessageRoleEnumMap = { MessageRole.system: 'system', MessageRole.user: 'user', MessageRole.assistant: 'assistant', + MessageRole.tool: 'tool', }; +_$ToolImpl _$$ToolImplFromJson(Map json) => _$ToolImpl( + type: $enumDecodeNullable(_$ToolTypeEnumMap, json['type']) ?? + ToolType.function, + function: json['function'] == null + ? null + : ToolFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolImplToJson(_$ToolImpl instance) { + final val = { + 'type': _$ToolTypeEnumMap[instance.type]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +const _$ToolTypeEnumMap = { + ToolType.function: 'function', +}; + +_$ToolFunctionImpl _$$ToolFunctionImplFromJson(Map json) => + _$ToolFunctionImpl( + name: json['name'] as String, + description: json['description'] as String, + parameters: json['parameters'] as Map, + ); + +Map _$$ToolFunctionImplToJson(_$ToolFunctionImpl instance) => + { + 'name': instance.name, + 'description': instance.description, + 'parameters': instance.parameters, + }; + +_$ToolCallImpl _$$ToolCallImplFromJson(Map json) => + _$ToolCallImpl( + function: json['function'] == null + ? null + : ToolCallFunction.fromJson(json['function'] as Map), + ); + +Map _$$ToolCallImplToJson(_$ToolCallImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('function', instance.function?.toJson()); + return val; +} + +_$ToolCallFunctionImpl _$$ToolCallFunctionImplFromJson( + Map json) => + _$ToolCallFunctionImpl( + name: json['name'] as String, + arguments: json['arguments'] as Map, + ); + +Map _$$ToolCallFunctionImplToJson( + _$ToolCallFunctionImpl instance) => + { + 'name': instance.name, + 'arguments': instance.arguments, + }; + _$GenerateEmbeddingRequestImpl _$$GenerateEmbeddingRequestImplFromJson( Map json) => _$GenerateEmbeddingRequestImpl( @@ -476,6 +584,85 @@ Map _$$ModelDetailsImplToJson(_$ModelDetailsImpl instance) { return val; } +_$ModelInformationImpl _$$ModelInformationImplFromJson( + Map json) => + _$ModelInformationImpl( + generalArchitecture: json['general.architecture'] as String?, + generalFileType: json['general.file_type'] as int?, + generalParameterCount: json['general.parameter_count'] as int?, + generalQuantizationVersion: json['general.quantization_version'] as int?, + ); + +Map _$$ModelInformationImplToJson( + _$ModelInformationImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('general.architecture', instance.generalArchitecture); + writeNotNull('general.file_type', instance.generalFileType); + writeNotNull('general.parameter_count', instance.generalParameterCount); + writeNotNull( + 'general.quantization_version', instance.generalQuantizationVersion); + return val; +} + +_$ProcessResponseImpl _$$ProcessResponseImplFromJson( + Map json) => + _$ProcessResponseImpl( + models: (json['models'] as List?) + ?.map((e) => ProcessModel.fromJson(e as Map)) + .toList(), + ); + +Map _$$ProcessResponseImplToJson( + _$ProcessResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('models', instance.models?.map((e) => e.toJson()).toList()); + return val; +} + +_$ProcessModelImpl _$$ProcessModelImplFromJson(Map json) => + _$ProcessModelImpl( + model: json['model'] as String?, + size: json['size'] as int?, + digest: json['digest'] as String?, + details: json['details'] == null + ? null + : ModelDetails.fromJson(json['details'] as Map), + expiresAt: json['expires_at'] as String?, + sizeVram: json['size_vram'] as int?, + ); + +Map _$$ProcessModelImplToJson(_$ProcessModelImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('model', instance.model); + writeNotNull('size', instance.size); + writeNotNull('digest', instance.digest); + writeNotNull('details', instance.details?.toJson()); + writeNotNull('expires_at', instance.expiresAt); + writeNotNull('size_vram', instance.sizeVram); + return val; +} + _$ModelInfoRequestImpl _$$ModelInfoRequestImplFromJson( Map json) => _$ModelInfoRequestImpl( @@ -498,6 +685,10 @@ _$ModelInfoImpl _$$ModelInfoImplFromJson(Map json) => details: json['details'] == null ? null : ModelDetails.fromJson(json['details'] as Map), + modelInfo: json['model_info'] == null + ? null + : ModelInformation.fromJson( + json['model_info'] as Map), messages: (json['messages'] as List?) ?.map((e) => Message.fromJson(e as Map)) .toList(), @@ -518,6 +709,7 @@ Map _$$ModelInfoImplToJson(_$ModelInfoImpl instance) { writeNotNull('template', instance.template); writeNotNull('system', instance.system); writeNotNull('details', instance.details?.toJson()); + writeNotNull('model_info', instance.modelInfo?.toJson()); writeNotNull('messages', instance.messages?.map((e) => e.toJson()).toList()); return val; } @@ -645,8 +837,7 @@ Map _$$PushModelRequestImplToJson( _$PushModelResponseImpl _$$PushModelResponseImplFromJson( Map json) => _$PushModelResponseImpl( - status: $enumDecodeNullable(_$PushModelStatusEnumMap, json['status'], - unknownValue: JsonKey.nullForUndefinedEnumValue), + status: json['status'] as String?, digest: json['digest'] as String?, total: json['total'] as int?, completed: json['completed'] as int?, @@ -662,16 +853,9 @@ Map _$$PushModelResponseImplToJson( } } - writeNotNull('status', _$PushModelStatusEnumMap[instance.status]); + writeNotNull('status', instance.status); writeNotNull('digest', instance.digest); writeNotNull('total', instance.total); writeNotNull('completed', instance.completed); return val; } - -const _$PushModelStatusEnumMap = { - PushModelStatus.retrievingManifest: 'retrieving manifest', - PushModelStatus.startingUpload: 'starting upload', - PushModelStatus.pushingManifest: 'pushing manifest', - PushModelStatus.success: 'success', -}; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool.dart b/packages/ollama_dart/lib/src/generated/schema/tool.dart new file mode 100644 index 00000000..4a225d1a --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool.dart @@ -0,0 +1,53 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: Tool +// ========================================== + +/// A tool the model may call. +@freezed +class Tool with _$Tool { + const Tool._(); + + /// Factory constructor for Tool + const factory Tool({ + /// The type of tool. + @Default(ToolType.function) ToolType type, + + /// A function that the model may call. + @JsonKey(includeIfNull: false) ToolFunction? function, + }) = _Tool; + + /// Object construction from a JSON representation + factory Tool.fromJson(Map json) => _$ToolFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'function': function, + }; + } +} + +// ========================================== +// ENUM: ToolType +// ========================================== + +/// The type of tool. +enum ToolType { + @JsonValue('function') + function, +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart new file mode 100644 index 00000000..ec1d82e0 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCall +// ========================================== + +/// The tool the model wants to call. +@freezed +class ToolCall with _$ToolCall { + const ToolCall._(); + + /// Factory constructor for ToolCall + const factory ToolCall({ + /// The function the model wants to call. + @JsonKey(includeIfNull: false) ToolCallFunction? function, + }) = _ToolCall; + + /// Object construction from a JSON representation + factory ToolCall.fromJson(Map json) => + _$ToolCallFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['function']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'function': function, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart new file mode 100644 index 00000000..4d5e969c --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolCallFunction +// ========================================== + +/// The function the model wants to call. +@freezed +class ToolCallFunction with _$ToolCallFunction { + const ToolCallFunction._(); + + /// Factory constructor for ToolCallFunction + const factory ToolCallFunction({ + /// The name of the function to be called. + required String name, + + /// The arguments to pass to the function. + required ToolCallFunctionArgs arguments, + }) = _ToolCallFunction; + + /// Object construction from a JSON representation + factory ToolCallFunction.fromJson(Map json) => + _$ToolCallFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['name', 'arguments']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'arguments': arguments, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart new file mode 100644 index 00000000..a1d7d7b8 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_call_function_args.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolCallFunctionArgs +// ========================================== + +/// The arguments to pass to the function. +typedef ToolCallFunctionArgs = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart new file mode 100644 index 00000000..35d5e8f1 --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function.dart @@ -0,0 +1,52 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: ToolFunction +// ========================================== + +/// A function that the model may call. +@freezed +class ToolFunction with _$ToolFunction { + const ToolFunction._(); + + /// Factory constructor for ToolFunction + const factory ToolFunction({ + /// The name of the function to be called. + required String name, + + /// A description of what the function does, used by the model to choose when and how to call the function. + required String description, + + /// The parameters the functions accepts, described as a JSON Schema object. + required ToolFunctionParams parameters, + }) = _ToolFunction; + + /// Object construction from a JSON representation + factory ToolFunction.fromJson(Map json) => + _$ToolFunctionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'parameters' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'parameters': parameters, + }; + } +} diff --git a/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart new file mode 100644 index 00000000..89fa74fb --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/tool_function_params.dart @@ -0,0 +1,12 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// TYPE: ToolFunctionParams +// ========================================== + +/// The parameters the functions accepts, described as a JSON Schema object. +typedef ToolFunctionParams = Map; diff --git a/packages/ollama_dart/lib/src/generated/schema/version_response.dart b/packages/ollama_dart/lib/src/generated/schema/version_response.dart new file mode 100644 index 00000000..21d3259e --- /dev/null +++ b/packages/ollama_dart/lib/src/generated/schema/version_response.dart @@ -0,0 +1,40 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of ollama_schema; + +// ========================================== +// CLASS: VersionResponse +// ========================================== + +/// The response class for the version endpoint. +@freezed +class VersionResponse with _$VersionResponse { + const VersionResponse._(); + + /// Factory constructor for VersionResponse + const factory VersionResponse({ + /// The version of the Ollama server. + @JsonKey(includeIfNull: false) String? version, + }) = _VersionResponse; + + /// Object construction from a JSON representation + factory VersionResponse.fromJson(Map json) => + _$VersionResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['version']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'version': version, + }; + } +} diff --git a/packages/ollama_dart/lib/src/http_client/http_client.dart b/packages/ollama_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/ollama_dart/lib/src/http_client/http_client.dart +++ b/packages/ollama_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/ollama_dart/oas/ollama-curated.yaml b/packages/ollama_dart/oas/ollama-curated.yaml index b63d0c21..05b3f593 100644 --- a/packages/ollama_dart/oas/ollama-curated.yaml +++ b/packages/ollama_dart/oas/ollama-curated.yaml @@ -20,6 +20,18 @@ tags: description: List and describe the various models available. paths: + /version: + get: + operationId: getVersion + summary: Returns the version of the Ollama server. + description: This endpoint returns the version of the Ollama server. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/VersionResponse' /generate: post: operationId: generateCompletion @@ -109,6 +121,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ModelsResponse' + /ps: + get: + operationId: listRunningModels + tags: + - Models + summary: List models that are running. + responses: + '200': + description: Successful operation. + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessResponse' /show: post: operationId: showModelInfo @@ -171,7 +196,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PullModelResponse' /push: @@ -190,7 +215,7 @@ paths: '200': description: Successful operation. content: - application/json: + application/x-ndjson: schema: $ref: '#/components/schemas/PushModelResponse' /blobs/{digest}: @@ -198,10 +223,11 @@ paths: operationId: checkBlob tags: - Models - summary: Check to see if a blob exists on the Ollama server which is useful when creating models. + summary: Ensures that the file blob used for a FROM or ADAPTER field exists on the server. + description: This is checking your Ollama server and not Ollama.ai. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true @@ -218,8 +244,8 @@ paths: - Models summary: Create a blob from a file. Returns the server file path. parameters: - - in: query - name: name + - in: path + name: digest schema: type: string required: true @@ -247,11 +273,14 @@ components: The model name. Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version. - example: llama3:8b + example: llama3.2 prompt: type: string description: The prompt to generate a response. example: Why is the sky blue? + suffix: + type: string + description: The text that comes after the inserted text. images: type: array description: (optional) a list of Base64-encoded images to include in the message (for multimodal models such as llava) @@ -286,10 +315,10 @@ components: description: &stream | If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects. default: false - keep_alive: + keep_alive: &keep_alive type: integer nullable: true - description: &keep_alive | + description: | How long (in minutes) to keep the model loaded in memory. - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration. @@ -312,90 +341,115 @@ components: type: integer nullable: true description: | - Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + Sets the random number seed to use for generation. Setting this to a specific number will make the model + generate the same text for the same prompt. (Default: 0) num_predict: type: integer nullable: true description: | - Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + Maximum number of tokens to predict when generating text. + (Default: 128, -1 = infinite generation, -2 = fill context) top_k: type: integer nullable: true description: | - Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: type: number format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + Works together with top_k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + min_p: + type: number + format: float + nullable: true + description: | + Alternative to the top_p, and aims to ensure a balance of quality and variety. min_p represents the minimum + probability for a token to be considered, relative to the probability of the most likely token. For + example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less + than 0.05*0.9=0.045 are filtered out. (Default: 0.0) tfs_z: type: number format: float nullable: true description: | - Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) typical_p: type: number format: float nullable: true description: | - Typical p is used to reduce the impact of less probable tokens from the output. + Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) repeat_last_n: type: integer nullable: true description: | - Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + Sets how far back for the model to look back to prevent repetition. + (Default: 64, 0 = disabled, -1 = num_ctx) temperature: type: number format: float nullable: true description: | - The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + The temperature of the model. Increasing the temperature will make the model answer more creatively. + (Default: 0.8) repeat_penalty: type: number format: float nullable: true description: | - Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) presence_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + Positive values penalize new tokens based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. (Default: 0) frequency_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. (Default: 0) mirostat: type: integer nullable: true description: | - Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + Enable Mirostat sampling for controlling perplexity. + (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) mirostat_tau: type: number format: float nullable: true description: | - Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + Controls the balance between coherence and diversity of the output. A lower value will result in more + focused and coherent text. (Default: 5.0) mirostat_eta: type: number format: float nullable: true description: | - Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + (Default: 0.1) penalize_newline: type: boolean nullable: true description: | - Penalize newlines in the output. (Default: false) + Penalize newlines in the output. (Default: true) stop: type: array nullable: true - description: Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Sequences where the API will stop generating further tokens. The returned text will not contain the stop + sequence. items: type: string numa: @@ -407,17 +461,18 @@ components: type: integer nullable: true description: | - Sets the size of the context window used to generate the next token. + Sets the size of the context window used to generate the next token. (Default: 2048) num_batch: type: integer nullable: true description: | - Sets the number of batches to use for generation. (Default: 1) + Sets the number of batches to use for generation. (Default: 512) num_gpu: type: integer nullable: true description: | - The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + The number of layers to send to the GPU(s). + On macOS it defaults to 1 to enable metal support, 0 to disable. main_gpu: type: integer nullable: true @@ -432,7 +487,7 @@ components: type: boolean nullable: true description: | - Enable f16 key/value. (Default: false) + Enable f16 key/value. (Default: true) logits_all: type: boolean nullable: true @@ -457,7 +512,9 @@ components: type: integer nullable: true description: | - Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + performance. It is recommended to set this value to the number of physical CPU cores your system has + (as opposed to the logical number of cores). ResponseFormat: type: string description: | @@ -468,6 +525,13 @@ components: Note: it's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace. enum: - json + VersionResponse: + type: object + description: The response class for the version endpoint. + properties: + version: + type: string + description: The version of the Ollama server. GenerateCompletionResponse: type: object description: The response class for the generate endpoint. @@ -475,7 +539,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 created_at: type: string format: date-time @@ -532,7 +596,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 messages: type: array description: The messages of the chat, this can be used to keep a chat memory @@ -546,10 +610,12 @@ components: type: boolean description: *stream default: false - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive + tools: + type: array + description: A list of tools the model may call. + items: + $ref: '#/components/schemas/Tool' required: - model - messages @@ -562,7 +628,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 created_at: type: string format: date-time @@ -602,6 +668,11 @@ components: format: int64 description: Time in nanoseconds spent generating the response. example: 1325948000 + required: + - model + - created_at + - message + - done DoneReason: type: string description: Reason why the model is done generating a response. @@ -616,7 +687,7 @@ components: role: type: string description: The role of the message - enum: [ "system", "user", "assistant" ] + enum: [ "system", "user", "assistant", "tool" ] content: type: string description: The content of the message @@ -628,9 +699,69 @@ components: type: string description: Base64-encoded image (for multimodal models such as llava) example: iVBORw0KGgoAAAANSUhEUgAAAAkAAAANCAIAAAD0YtNRAAAABnRSTlMA/AD+APzoM1ogAAAAWklEQVR4AWP48+8PLkR7uUdzcMvtU8EhdykHKAciEXL3pvw5FQIURaBDJkARoDhY3zEXiCgCHbNBmAlUiyaBkENoxZSDWnOtBmoAQu7TnT+3WuDOA7KBIkAGAGwiNeqjusp/AAAAAElFTkSuQmCC + tool_calls: + type: array + description: A list of tools the model wants to call. + items: + $ref: '#/components/schemas/ToolCall' required: - role - content + Tool: + type: object + description: A tool the model may call. + properties: + type: + type: string + enum: + - function + default: function + description: The type of tool. + function: + $ref: '#/components/schemas/ToolFunction' + ToolFunction: + type: object + description: A function that the model may call. + properties: + name: + type: string + description: The name of the function to be called. + description: + type: string + description: | + A description of what the function does, used by the model to choose when and how to call the function. + parameters: + $ref: '#/components/schemas/ToolFunctionParams' + required: + - name + - description + - parameters + ToolFunctionParams: + type: object + description: The parameters the functions accepts, described as a JSON Schema object. + additionalProperties: true + ToolCall: + type: object + description: The tool the model wants to call. + properties: + function: + $ref: '#/components/schemas/ToolCallFunction' + ToolCallFunction: + type: object + description: The function the model wants to call. + properties: + name: + type: string + description: The name of the function to be called. + arguments: + $ref: '#/components/schemas/ToolCallFunctionArgs' + required: + - name + - arguments + ToolCallFunctionArgs: + type: object + description: The arguments to pass to the function. + additionalProperties: true GenerateEmbeddingRequest: description: Generate embeddings from a model. type: object @@ -638,17 +769,14 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 prompt: type: string description: Text to generate embeddings for. example: 'Here is an article about llamas...' options: $ref: '#/components/schemas/RequestOptions' - keep_alive: - type: integer - nullable: true - description: *keep_alive + keep_alive: *keep_alive required: - model - prompt @@ -718,7 +846,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 modified_at: type: string format: date-time @@ -759,6 +887,63 @@ components: quantization_level: type: string description: The quantization level of the model. + ModelInformation: + type: object + description: Details about a model. + properties: + general.architecture: + type: string + description: The architecture of the model. + general.file_type: + type: integer + nullable: true + description: The file type of the model. + general.parameter_count: + type: integer + format: int64 + nullable: true + description: The number of parameters in the model. + general.quantization_version: + type: integer + nullable: true + description: The number of parameters in the model. + ProcessResponse: + type: object + description: Response class for the list running models endpoint. + properties: + models: + type: array + description: List of running models. + items: + $ref: '#/components/schemas/ProcessModel' + ProcessModel: + type: object + description: A model that is currently loaded. + properties: + model: + type: string + description: *model_name + example: llama3.2 + size: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 + digest: + type: string + description: The model's digest. + example: 'sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711a' + details: + $ref: '#/components/schemas/ModelDetails' + expires_at: + type: string + format: date-time + example: 2023-08-02T17:02:23.713454393-07:00 + size_vram: + type: integer + format: int64 + description: Size of the model on disk. + example: 7323310500 ModelInfoRequest: description: Request class for the show model info endpoint. type: object @@ -766,7 +951,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 required: - model ModelInfo: @@ -799,12 +984,14 @@ components: description: The system prompt for the model. details: $ref: '#/components/schemas/ModelDetails' + model_info: + $ref: '#/components/schemas/ModelInformation' messages: type: array nullable: true description: The default messages for the model. items: - $ref: '#/components/schemas/Message' + $ref: '#/components/schemas/Message' CopyModelRequest: description: Request class for copying a model. type: object @@ -812,7 +999,7 @@ components: source: type: string description: Name of the model to copy. - example: llama3:8b + example: llama3.2 destination: type: string description: Name of the new model. @@ -837,7 +1024,7 @@ components: model: type: string description: *model_name - example: llama3:8b + example: llama3.2 insecure: type: boolean description: | @@ -925,7 +1112,8 @@ components: description: Response class for pushing a model. properties: status: - $ref: '#/components/schemas/PushModelStatus' + type: string + description: Status pushing the model. digest: type: string description: the model's digest @@ -940,11 +1128,3 @@ components: format: int64 description: Total bytes transferred. example: 2142590208 - PushModelStatus: - type: string - description: Status pushing the model. - enum: - - retrieving manifest - - starting upload - - pushing manifest - - success diff --git a/packages/ollama_dart/pubspec.yaml b/packages/ollama_dart/pubspec.yaml index 30f792f5..52b3b896 100644 --- a/packages/ollama_dart/pubspec.yaml +++ b/packages/ollama_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: ollama_dart -description: Dart Client for the Ollama API (run Llama 3, Phi-3, WizardLM-2, Mistral 7B, Gemma and other models locally). -version: 0.1.0 +description: Dart Client for the Ollama API (run Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, Qwen2 and other models locally). +version: 0.2.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/ollama_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:ollama_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,22 +13,22 @@ topics: - ollama environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/ollama_dart/test/ollama_dart_chat_test.dart b/packages/ollama_dart/test/ollama_dart_chat_test.dart index af90c448..3e8afd82 100644 --- a/packages/ollama_dart/test/ollama_dart_chat_test.dart +++ b/packages/ollama_dart/test/ollama_dart_chat_test.dart @@ -7,19 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'llama3.2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); @@ -48,7 +48,7 @@ void main() { expect(response.model, defaultModel); expect(response.createdAt, isNotNull); expect( - response.message?.content, + response.message.content, isNotEmpty, ); expect(response.done, isTrue); @@ -79,7 +79,7 @@ void main() { ); String text = ''; await for (final res in stream) { - text += (res.message?.content ?? '').trim(); + text += res.message.content.trim(); } expect(text, contains('123456789')); }); @@ -103,7 +103,7 @@ void main() { format: ResponseFormat.json, ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('[1,2,3,4,5,6,7,8,9]')); }); @@ -125,7 +125,7 @@ void main() { options: RequestOptions(stop: ['4']), ), ); - final generation = res.message?.content.replaceAll(RegExp(r'[\s\n]'), ''); + final generation = res.message.content.replaceAll(RegExp(r'[\s\n]'), ''); expect(generation, contains('123')); expect(generation, isNot(contains('456789'))); expect(res.doneReason, DoneReason.stop); @@ -170,8 +170,65 @@ void main() { ); final res1 = await client.generateChatCompletion(request: request); - final text1 = res1.message?.content; + final text1 = res1.message.content; expect(text1, contains('star')); }); + + test('Test tool calling', () async { + const tool = Tool( + function: ToolFunction( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and country, e.g. San Francisco, US', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ), + ); + + final res = await client.generateChatCompletion( + request: const GenerateChatCompletionRequest( + model: defaultModel, + messages: [ + Message( + role: MessageRole.system, + content: 'You are a helpful assistant.', + ), + Message( + role: MessageRole.user, + content: + 'What’s the weather like in Boston and Barcelona in celsius?', + ), + ], + tools: [tool], + keepAlive: 1, + ), + ); + // https://github.com/ollama/ollama/issues/5796 + expect(res.doneReason, DoneReason.stop); + expect(res.message.role, MessageRole.assistant); + expect(res.message.content, isEmpty); + final toolCalls = res.message.toolCalls; + expect(toolCalls, hasLength(2)); + final toolCall1 = toolCalls?.first.function; + expect(toolCall1?.name, tool.function?.name); + expect(toolCall1?.arguments['location'], contains('Boston')); + expect(toolCall1?.arguments['unit'], 'celsius'); + final toolCall2 = toolCalls?.last.function; + expect(toolCall2?.name, tool.function?.name); + expect(toolCall2?.arguments['location'], contains('Barcelona')); + expect(toolCall2?.arguments['unit'], 'celsius'); + }); }); } diff --git a/packages/ollama_dart/test/ollama_dart_completions_test.dart b/packages/ollama_dart/test/ollama_dart_completions_test.dart index 5c4b2981..5a134b37 100644 --- a/packages/ollama_dart/test/ollama_dart_completions_test.dart +++ b/packages/ollama_dart/test/ollama_dart_completions_test.dart @@ -7,20 +7,19 @@ void main() { group('Ollama Generate Completions API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; - const visionModel = 'llava:latest'; + const defaultModel = 'gemma2'; + const visionModel = 'llava'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); - expect( - res.models?.firstWhere((final m) => m.model == visionModel), + res.models?.firstWhere((final m) => m.model!.startsWith(visionModel)), isNotNull, ); }); @@ -76,9 +75,9 @@ void main() { }); test('Test call completions API with raw mode', () async { - const testPrompt = '[INST] List the numbers from 1 to 9 in order. ' + const testPrompt = 'List the numbers from 1 to 9 in order. ' 'Output ONLY the numbers in one line without any spaces or commas. ' - 'NUMBERS: [/INST]'; + 'NUMBERS:'; final res = await client.generateCompletion( request: const GenerateCompletionRequest( diff --git a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart index c32701a8..e6ff8b6f 100644 --- a/packages/ollama_dart/test/ollama_dart_embeddings_test.dart +++ b/packages/ollama_dart/test/ollama_dart_embeddings_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Generate Embeddings API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'mxbai-embed-large:335m'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); diff --git a/packages/ollama_dart/test/ollama_dart_models_test.dart b/packages/ollama_dart/test/ollama_dart_models_test.dart index f77a9d32..03086e4b 100644 --- a/packages/ollama_dart/test/ollama_dart_models_test.dart +++ b/packages/ollama_dart/test/ollama_dart_models_test.dart @@ -7,14 +7,14 @@ void main() { group('Ollama Models API tests', skip: Platform.environment.containsKey('CI'), () { late OllamaClient client; - const defaultModel = 'llama3:latest'; + const defaultModel = 'gemma2'; setUp(() async { client = OllamaClient(); // Check that the model exists final res = await client.listModels(); expect( - res.models?.firstWhere((final m) => m.model == defaultModel), + res.models?.firstWhere((final m) => m.model!.startsWith(defaultModel)), isNotNull, ); }); @@ -62,7 +62,26 @@ void main() { test('Test list models', () async { final res = await client.listModels(); - expect(res.models?.any((final m) => m.model == defaultModel), isTrue); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); + }); + + test('Test list running models', () async { + await client.generateCompletion( + request: const GenerateCompletionRequest( + model: defaultModel, + prompt: 'You are a llama', + options: RequestOptions(numPredict: 1), + ), + ); + + final res = await client.listRunningModels(); + expect( + res.models?.any((final m) => m.model!.startsWith(defaultModel)), + isTrue, + ); }); test('Test show model info', () async { @@ -71,7 +90,17 @@ void main() { ); expect(res.license, isNotEmpty); expect(res.modelfile, isNotEmpty); + expect(res.parameters, isNotEmpty); expect(res.template, isNotEmpty); + expect(res.details?.format, isNotEmpty); + expect(res.details?.family, isNotEmpty); + expect(res.details?.families, isNotEmpty); + expect(res.details?.parameterSize, isNotEmpty); + expect(res.details?.quantizationLevel, isNotEmpty); + expect(res.modelInfo?.generalArchitecture, isNotEmpty); + expect(res.modelInfo?.generalFileType, greaterThan(0)); + expect(res.modelInfo?.generalParameterCount, greaterThan(0)); + expect(res.modelInfo?.generalQuantizationVersion, greaterThan(0)); }); test('Test copy model', () async { @@ -129,7 +158,7 @@ void main() { request: const PushModelRequest(model: 'mattw/pygmalion:latest'), ); - expect(res.status, PushModelStatus.success); + expect(res.status, equals('success')); }); test('Test push model stream', skip: true, () async { @@ -138,25 +167,25 @@ void main() { ); int count = 0; - PushModelStatus? lastStatus; + String? lastStatus; await for (final res in stream) { lastStatus = res.status; count++; } expect(count, greaterThan(1)); - expect(lastStatus, equals(PushModelStatus.success)); + expect(lastStatus, equals('success')); }); test('Test check blob', skip: true, () async { await client.checkBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', ); }); test('Test create blob', skip: true, () async { await client.createBlob( - name: + digest: 'sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2', request: 'file contents', ); diff --git a/packages/ollama_dart/test/ollama_dart_version_test.dart b/packages/ollama_dart/test/ollama_dart_version_test.dart new file mode 100644 index 00000000..002f8167 --- /dev/null +++ b/packages/ollama_dart/test/ollama_dart_version_test.dart @@ -0,0 +1,24 @@ +import 'dart:io'; + +import 'package:ollama_dart/ollama_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Ollama Version API tests', + skip: Platform.environment.containsKey('CI'), () { + late OllamaClient client; + + setUp(() async { + client = OllamaClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test get version', () async { + final res = await client.getVersion(); + expect(res.version, isNotEmpty); + }); + }); +} diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md index 632fa141..aa3ac2cc 100644 --- a/packages/openai_dart/CHANGELOG.md +++ b/packages/openai_dart/CHANGELOG.md @@ -1,3 +1,44 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.4.2 + + - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e)) + - **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582)) + - **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935)) + +## 0.4.1 + + - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d)) + - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c)) + - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c)) + - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7)) + +## 0.4.0 + + - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2)) + - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf)) + - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc)) + - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554)) + - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1)) + - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40)) + +## 0.3.3+1 + + - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5)) + +## 0.3.3 + + - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3)) + - **FIX**: Make vector store name optional in openai_dart ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff)) + - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678)) + +## 0.3.2+1 + + - **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8)) + - **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5)) + ## 0.3.2 - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896)) diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index f020d128..68a26356 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,11 +16,11 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), etc. +- It can be used to consume OpenAI-compatible APIs like [GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** -- Chat (with tools and streaming support) +- Chat (with structured outputs, tools and streaming support) - Completions (legacy) - Embeddings - Fine-tuning @@ -28,7 +28,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Images - Models - Moderations -- Assistants v2 (with tools and streaming support) `beta` +- Assistants v2 (with structured outputs, tools and streaming support) `beta` * Threads * Messages * Runs @@ -97,14 +97,14 @@ final client = OpenAIClient( Given a list of messages comprising a conversation, the model will return a response. -Related guide: [Chat Completions](https://platform.openai.com/docs/guides/text-generation) +Related guide: [Chat Completions](https://platform.openai.com/docs/guides/chat-completions) **Create chat completion:** ```dart final res = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -121,28 +121,28 @@ print(res.choices.first.message.content); ``` `ChatCompletionModel` is a sealed class that offers two ways to specify the model: -- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4'` or your fine-tuned model ID). -- `ChatCompletionModel.model(ChatCompletionModels.gpt4)`: a value from `ChatCompletionModels` enum which lists all of the available models. +- `ChatCompletionModel.modelId('model-id')`: the model ID as string (e.g. `'gpt-4o'` or your fine-tuned model ID). +- `ChatCompletionModel.model(ChatCompletionModels.gpt4o)`: a value from `ChatCompletionModels` enum which lists all of the available models. `ChatCompletionMessage` is a sealed class that supports the following message types: - `ChatCompletionMessage.system()`: a system message. - `ChatCompletionMessage.user()`: a user message. - `ChatCompletionMessage.assistant()`: an assistant message. - `ChatCompletionMessage.tool()`: a tool message. -- `ChatCompletionMessage.function()`: a function message. +- `ChatCompletionMessage.function()`: a function message (deprecated in favor of tools). `ChatCompletionMessage.user()` takes a `ChatCompletionUserMessageContent` object that supports the following content types: - `ChatCompletionUserMessageContent.string('content')`: string content. - `ChatCompletionUserMessageContent.parts([...])`: multi-modal content (check the 'Multi-modal prompt' section below). * `ChatCompletionMessageContentPart.text('content')`: text content. - * `ChatCompletionMessageContentPart.image(imageUrl: ...)`: image content. + * `ChatCompletionMessageContentPart.image(...)`: image content (URL or base64-encoded image). **Stream chat completion:** ```dart final stream = client.createChatCompletionStream( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-4-turbo'), + model: ChatCompletionModel.modelId('gpt-4o'), messages: [ ChatCompletionMessage.system( content: @@ -165,7 +165,9 @@ await for (final res in stream) { // 789 ``` -**Multi-modal prompt:** +**Multi-modal prompt:** ([docs](https://platform.openai.com/docs/guides/vision)) + +You can either provide the image URL: ```dart final res = await client.createChatCompletion( @@ -198,7 +200,76 @@ print(res.choices.first.message.content); // The fruit in the image is an apple. ``` -**JSON mode:** +Or provide the base64-encoded image: +```dart +//... +ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.parts( + [ + ChatCompletionMessageContentPart.text( + text: 'What fruit is this?', + ), + ChatCompletionMessageContentPart.image( + imageUrl: ChatCompletionMessageImageUrl( + url: '/9j/4AAQSkZJRgABAQAAAQABAAD/2wB...P3s/XHQ8cE/nmiupbL0+fz/r/MjnSbsr69/Rdu1j//2Q==', + detail: ChatCompletionMessageImageDetail.high, + ), + ), + ], + ), +), +//... +``` + +**Structured output: ([docs](https://platform.openai.com/docs/guides/structured-outputs))** + +Structured Outputs is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema. + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, + ), + messages: [ + ChatCompletionMessage.system( + content: 'You are a helpful assistant. That extracts names from text.', + ), + ChatCompletionMessage.user( + content: ChatCompletionUserMessageContent.string( + 'John, Mary, and Peter.', + ), + ), + ], + temperature: 0, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ), + ), +); +// {"names":["John","Mary","Peter"]} +``` + +**JSON mode:** ([docs](https://platform.openai.com/docs/guides/structured-outputs/json-mode)) + +> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It us recommended to use Structured Outputs if it is supported for your use case. ```dart final res = await client.createChatCompletion( @@ -227,7 +298,9 @@ final res = await client.createChatCompletion( // { "names": ["John", "Mary", "Peter"] } ``` -**Tools:** +**Tools:** ([docs](https://platform.openai.com/docs/guides/function-calling)) + +Tool calling allows you to connect models to external tools and systems. ```dart const function = FunctionObject( @@ -256,8 +329,8 @@ const tool = ChatCompletionTool( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -308,6 +381,8 @@ final answer = res2.choices.first.message.content; // The weather in Boston right now is sunny with a temperature of 22°C ``` +You can enable Structured Outputs for your tools by setting `strict: true` in your `FunctionObject` definition. Structured Outputs ensures that the arguments generated by the model for a tool call exactly match the JSON Schema you provided in the tool definition. + **Function calling:** (deprecated in favor of tools) ```dart @@ -333,7 +408,7 @@ const function = FunctionObject( final res1 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -355,7 +430,7 @@ final functionResult = getCurrentWeather(arguments['location'], arguments['unit' final res2 = await client.createChatCompletion( request: CreateChatCompletionRequest( - model: ChatCompletionModel.modelId('gpt-3.5-turbo'), + model: ChatCompletionModel.modelId('gpt-4o-mini'), messages: [ ChatCompletionMessage.system( content: 'You are a helpful assistant.', @@ -480,7 +555,7 @@ Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-t ```dart const request = CreateFineTuningJobRequest( - model: FineTuningModel.modelId('gpt-3.5-turbo'), + model: FineTuningModel.modelId('gpt-4o-mini'), trainingFile: 'file-abc123', validationFile: 'file-abc123', hyperparameters: FineTuningJobHyperparameters( @@ -768,7 +843,7 @@ final res = await client.createThreadMessage( ), MessageContent.imageUrl( imageUrl: MessageContentImageUrl( - url: 'https://example.com/image.jpg', + url: 'https://example.com/image.jpg', // or base64-encoded image ), ), ]), @@ -822,6 +897,41 @@ final res = await client.createThreadRun( ); ``` +You can also use Structured Outputs to ensure that the model-generated responses adhere to a specific JSON schema: + +```dart +final res = await client.createThreadRun( + threadId: threadId, + request: CreateRunRequest( + assistantId: assistantId, + instructions: 'You are a helpful assistant that extracts names from text.', + model: CreateRunRequestModel.modelId('gpt-4o'), + responseFormat: CreateRunRequestResponseFormat.responseFormat( + ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), + ) + ) + ), +); +``` + **Create run: (streaming)** ```dart @@ -1087,21 +1197,21 @@ final client = OpenAIClient( This client can be used to consume APIs that are compatible with the OpenAI API spec. -[TogetherAI](https://www.together.ai/): +[GitHub Models](https://github.com/marketplace/models): ```dart final client = OpenAIClient( - baseUrl: 'https://api.together.xyz/v1', - headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, + baseUrl: 'https://models.inference.ai.azure.com', + headers: { 'api-key': 'YOUR_GITHUB_TOKEN' }, ); ``` -[Anyscale](https://www.anyscale.com/): +[TogetherAI](https://www.together.ai/): ```dart final client = OpenAIClient( - baseUrl: 'https://api.endpoints.anyscale.com/v1', - headers: { 'api-key': 'YOUR_ANYSCALE_API_KEY' }, + baseUrl: 'https://api.together.xyz/v1', + headers: { 'api-key': 'YOUR_TOGETHER_AI_API_KEY' }, ); ``` diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart index 87830981..57003125 100644 --- a/packages/openai_dart/lib/openai_dart.dart +++ b/packages/openai_dart/lib/openai_dart.dart @@ -1,4 +1,4 @@ -/// Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +/// Dart client for the OpenAI API. Supports chat (GPT-4o, etc.), completions, embeddings, images (DALL·E 3), assistants (threads, runs, vector stores, etc.), batch, fine-tuning, etc. library; export 'src/client.dart'; diff --git a/packages/openai_dart/lib/src/client.dart b/packages/openai_dart/lib/src/client.dart index 098a4cf2..b01a1594 100644 --- a/packages/openai_dart/lib/src/client.dart +++ b/packages/openai_dart/lib/src/client.dart @@ -18,6 +18,8 @@ class OpenAIClient extends g.OpenAIClient { /// - `apiKey`: your OpenAI API key. You can find your API key in the /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). /// - `organization`: your OpenAI organization ID (if applicable). + /// - `beta`: the content to use for the `OpenAI-Beta` header which can be + /// used to enable beta features. /// /// Advance configuration options: /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can @@ -32,6 +34,7 @@ class OpenAIClient extends g.OpenAIClient { OpenAIClient({ final String? apiKey, final String? organization, + final String? beta = 'assistants=v2', final String? baseUrl, final Map? headers, final Map? queryParams, @@ -41,7 +44,7 @@ class OpenAIClient extends g.OpenAIClient { baseUrl: baseUrl, headers: { if (organization != null) 'OpenAI-Organization': organization, - 'OpenAI-Beta': 'assistants=v2', + if (beta != null) 'OpenAI-Beta': beta, ...?headers, }, queryParams: queryParams ?? const {}, diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index aca8f85f..b58d7e15 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -58,7 +58,7 @@ class OpenAIClientException implements Exception { // CLASS: OpenAIClient // ========================================== -/// Client for OpenAI API (v.2.0.0) +/// Client for OpenAI API (v.2.3.0) /// /// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. class OpenAIClient { @@ -1175,11 +1175,14 @@ class OpenAIClient { /// /// `threadId`: The ID of the thread to run. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `request`: Request object for the Create run endpoint. /// /// `POST` `https://api.openai.com/v1/threads/{thread_id}/runs` Future createThreadRun({ required String threadId, + String? include, required CreateRunRequest request, }) async { final r = await makeRequest( @@ -1190,6 +1193,9 @@ class OpenAIClient { requestType: 'application/json', responseType: 'application/json', body: request, + queryParams: { + if (include != null) 'include': include, + }, ); return RunObject.fromJson(_jsonDecode(r)); } @@ -1324,6 +1330,8 @@ class OpenAIClient { /// /// `before`: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps` Future listThreadRunSteps({ required String threadId, @@ -1332,6 +1340,7 @@ class OpenAIClient { String order = 'desc', String? after, String? before, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1345,6 +1354,7 @@ class OpenAIClient { 'order': order, if (after != null) 'after': after, if (before != null) 'before': before, + if (include != null) 'include': include, }, ); return ListRunStepsResponse.fromJson(_jsonDecode(r)); @@ -1362,11 +1372,14 @@ class OpenAIClient { /// /// `stepId`: The ID of the run step to retrieve. /// + /// `include`: A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + /// /// `GET` `https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/steps/{step_id}` Future getThreadRunStep({ required String threadId, required String runId, required String stepId, + String? include, }) async { final r = await makeRequest( baseUrl: 'https://api.openai.com/v1', @@ -1375,6 +1388,9 @@ class OpenAIClient { isMultipart: false, requestType: '', responseType: 'application/json', + queryParams: { + if (include != null) 'include': include, + }, ); return RunStepObject.fromJson(_jsonDecode(r)); } @@ -1846,7 +1862,7 @@ class OpenAIClient { // METHOD: cancelBatch // ------------------------------------------ - /// Cancels an in-progress batch. + /// Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. /// /// `batchId`: The ID of the batch to cancel. /// diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart index 59bac618..4c7ba8df 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_object.dart @@ -36,29 +36,47 @@ class AssistantObject with _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. required String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. required List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? responseFormat, @@ -160,8 +178,6 @@ enum AssistantObjectObject { /// `auto` is the default value enum AssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -170,25 +186,37 @@ enum AssistantResponseFormatMode { // CLASS: AssistantObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class AssistantObjectResponseFormat with _$AssistantObjectResponseFormat { const AssistantObjectResponseFormat._(); /// `auto` is the default value - const factory AssistantObjectResponseFormat.enumeration( + const factory AssistantObjectResponseFormat.mode( AssistantResponseFormatMode value, ) = AssistantObjectResponseFormatEnumeration; /// No Description - const factory AssistantObjectResponseFormat.assistantsResponseFormat( - AssistantsResponseFormat value, - ) = AssistantObjectResponseFormatAssistantsResponseFormat; + const factory AssistantObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = AssistantObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory AssistantObjectResponseFormat.fromJson(Map json) => @@ -215,8 +243,8 @@ class _AssistantObjectResponseFormatConverter } if (data is Map) { try { - return AssistantObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return AssistantObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -230,8 +258,7 @@ class _AssistantObjectResponseFormatConverter return switch (data) { AssistantObjectResponseFormatEnumeration(value: final v) => _$AssistantResponseFormatModeEnumMap[v]!, - AssistantObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + AssistantObjectResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart index 348155db..0686da7b 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_stream_event.dart @@ -61,7 +61,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamEvent({ /// The type of the event. required EventType event, @@ -74,7 +74,7 @@ sealed class AssistantStreamEvent with _$AssistantStreamEvent { // UNION: RunStepStreamDeltaEvent // ------------------------------------------ - /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + /// Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. const factory AssistantStreamEvent.runStepStreamDeltaEvent({ /// The type of the event. required EventType event, diff --git a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart index 6e45f715..30a5cacc 100644 --- a/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart +++ b/packages/openai_dart/lib/src/generated/schema/assistant_tools.dart @@ -30,7 +30,11 @@ sealed class AssistantTools with _$AssistantTools { /// FileSearch tool const factory AssistantTools.fileSearch({ /// The type of tool being defined: `file_search` - @Default('file_search') String type, + required String type, + + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch, }) = AssistantToolsFileSearch; // ------------------------------------------ @@ -63,3 +67,67 @@ enum AssistantToolsEnumType { @JsonValue('function') function, } + +// ========================================== +// CLASS: AssistantToolsFileSearchFileSearch +// ========================================== + +/// Overrides for the file search tool. +@freezed +class AssistantToolsFileSearchFileSearch + with _$AssistantToolsFileSearchFileSearch { + const AssistantToolsFileSearchFileSearch._(); + + /// Factory constructor for AssistantToolsFileSearchFileSearch + const factory AssistantToolsFileSearchFileSearch({ + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) int? maxNumResults, + + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions, + }) = _AssistantToolsFileSearchFileSearch; + + /// Object construction from a JSON representation + factory AssistantToolsFileSearchFileSearch.fromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'max_num_results', + 'ranking_options' + ]; + + /// Validation constants + static const maxNumResultsMinValue = 1; + static const maxNumResultsMaxValue = 50; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxNumResults != null && maxNumResults! < maxNumResultsMinValue) { + return "The value of 'maxNumResults' cannot be < $maxNumResultsMinValue"; + } + if (maxNumResults != null && maxNumResults! > maxNumResultsMaxValue) { + return "The value of 'maxNumResults' cannot be > $maxNumResultsMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_num_results': maxNumResults, + 'ranking_options': rankingOptions, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart b/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart deleted file mode 100644 index bc5f9c8b..00000000 --- a/packages/openai_dart/lib/src/generated/schema/assistants_response_format.dart +++ /dev/null @@ -1,53 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: AssistantsResponseFormat -// ========================================== - -/// An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. -@freezed -class AssistantsResponseFormat with _$AssistantsResponseFormat { - const AssistantsResponseFormat._(); - - /// Factory constructor for AssistantsResponseFormat - const factory AssistantsResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(AssistantsResponseFormatType.text) - AssistantsResponseFormatType type, - }) = _AssistantsResponseFormat; - - /// Object construction from a JSON representation - factory AssistantsResponseFormat.fromJson(Map json) => - _$AssistantsResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } -} - -// ========================================== -// ENUM: AssistantsResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum AssistantsResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/batch.dart b/packages/openai_dart/lib/src/generated/schema/batch.dart index 94cc6080..471ac112 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch.dart @@ -74,7 +74,9 @@ class Batch with _$Batch { @JsonKey(name: 'request_counts', includeIfNull: false) BatchRequestCounts? requestCounts, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _Batch; diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart index 8678903a..4b4adc2c 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_logprobs.dart @@ -16,7 +16,10 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { /// Factory constructor for ChatCompletionLogprobs const factory ChatCompletionLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionLogprobs; /// Object construction from a JSON representation @@ -24,7 +27,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { _$ChatCompletionLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +38,7 @@ class ChatCompletionLogprobs with _$ChatCompletionLogprobs { Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart index ae4d6e9c..93afcd9b 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -59,6 +59,9 @@ sealed class ChatCompletionMessage with _$ChatCompletionMessage { /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @JsonKey(includeIfNull: false) String? content, + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. @JsonKey(includeIfNull: false) String? name, @@ -135,12 +138,12 @@ sealed class ChatCompletionUserMessageContent with _$ChatCompletionUserMessageContent { const ChatCompletionUserMessageContent._(); - /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + /// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. const factory ChatCompletionUserMessageContent.parts( List value, ) = ChatCompletionMessageContentParts; - /// The text contents of the message. + /// The text contents of the user message. const factory ChatCompletionUserMessageContent.string( String value, ) = ChatCompletionUserMessageContentString; @@ -158,9 +161,11 @@ class _ChatCompletionUserMessageContentConverter @override ChatCompletionUserMessageContent fromJson(Object? data) { - if (data is List && - data.every((item) => item is ChatCompletionMessageContentPart)) { - return ChatCompletionMessageContentParts(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return ChatCompletionMessageContentParts(data + .map((i) => ChatCompletionMessageContentPart.fromJson( + i as Map)) + .toList(growable: false)); } if (data is String) { return ChatCompletionUserMessageContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart index e96bf346..6e38e239 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part.dart @@ -18,7 +18,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartText // ------------------------------------------ - /// A text content part of a user message. + /// A text content part of a message. const factory ChatCompletionMessageContentPart.text({ /// The type of the content part, in this case `text`. @Default(ChatCompletionMessageContentPartType.text) @@ -32,8 +32,7 @@ sealed class ChatCompletionMessageContentPart // UNION: ChatCompletionMessageContentPartImage // ------------------------------------------ - /// Union constructor for [ChatCompletionMessageContentPartImage] - @FreezedUnionValue('image_url') + /// An image content part of a user message. const factory ChatCompletionMessageContentPart.image({ /// The type of the content part, in this case `image_url`. @Default(ChatCompletionMessageContentPartType.imageUrl) @@ -43,6 +42,20 @@ sealed class ChatCompletionMessageContentPart @JsonKey(name: 'image_url') required ChatCompletionMessageImageUrl imageUrl, }) = ChatCompletionMessageContentPartImage; + // ------------------------------------------ + // UNION: ChatCompletionMessageContentPartRefusal + // ------------------------------------------ + + /// A refusal content part of a message. + const factory ChatCompletionMessageContentPart.refusal({ + /// The type of the content part, in this case `refusal`. + @Default(ChatCompletionMessageContentPartType.refusal) + ChatCompletionMessageContentPartType type, + + /// The refusal message generated by the model. + required String refusal, + }) = ChatCompletionMessageContentPartRefusal; + /// Object construction from a JSON representation factory ChatCompletionMessageContentPart.fromJson( Map json) => @@ -58,6 +71,8 @@ enum ChatCompletionMessageContentPartEnumType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart index 0b4409fb..1aeebe14 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_content_part_type.dart @@ -14,4 +14,6 @@ enum ChatCompletionMessageContentPartType { text, @JsonValue('image_url') imageUrl, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart index 1b4f0705..8d81379d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -79,7 +79,10 @@ class ChatCompletionStreamResponseChoiceLogprobs /// Factory constructor for ChatCompletionStreamResponseChoiceLogprobs const factory ChatCompletionStreamResponseChoiceLogprobs({ /// A list of message content tokens with log probability information. - required List? content, + @JsonKey(includeIfNull: false) List? content, + + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) List? refusal, }) = _ChatCompletionStreamResponseChoiceLogprobs; /// Object construction from a JSON representation @@ -88,7 +91,7 @@ class ChatCompletionStreamResponseChoiceLogprobs _$ChatCompletionStreamResponseChoiceLogprobsFromJson(json); /// List of all property names of schema - static const List propertyNames = ['content']; + static const List propertyNames = ['content', 'refusal']; /// Perform validations on the schema property values String? validateSchema() { @@ -99,6 +102,7 @@ class ChatCompletionStreamResponseChoiceLogprobs Map toMap() { return { 'content': content, + 'refusal': refusal, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart index e676c18c..5cc5fa0d 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart @@ -19,6 +19,9 @@ class ChatCompletionStreamResponseDelta /// The contents of the chunk message. @JsonKey(includeIfNull: false) String? content, + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) String? refusal, + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @@ -43,6 +46,7 @@ class ChatCompletionStreamResponseDelta /// List of all property names of schema static const List propertyNames = [ 'content', + 'refusal', 'function_call', 'tool_calls', 'role' @@ -57,6 +61,7 @@ class ChatCompletionStreamResponseDelta Map toMap() { return { 'content': content, + 'refusal': refusal, 'function_call': functionCall, 'tool_calls': toolCalls, 'role': role, diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart new file mode 100644 index 00000000..a8f0c03d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_request_param.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyRequestParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyRequestParam with _$ChunkingStrategyRequestParam { + const ChunkingStrategyRequestParam._(); + + // ------------------------------------------ + // UNION: AutoChunkingStrategyRequestParam + // ------------------------------------------ + + /// Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + /// and `chunk_overlap_tokens` of `400`. + const factory ChunkingStrategyRequestParam.auto({ + /// Always `auto`. + required String type, + }) = AutoChunkingStrategyRequestParam; + + // ------------------------------------------ + // UNION: StaticChunkingStrategyRequestParam + // ------------------------------------------ + + /// Static chunking strategy + const factory ChunkingStrategyRequestParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyRequestParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyRequestParam.fromJson(Map json) => + _$ChunkingStrategyRequestParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyRequestParamEnumType +// ========================================== + +enum ChunkingStrategyRequestParamEnumType { + @JsonValue('auto') + auto, + @JsonValue('static') + static, +} diff --git a/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart new file mode 100644 index 00000000..c706df60 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chunking_strategy_response_param.dart @@ -0,0 +1,55 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChunkingStrategyResponseParam +// ========================================== + +/// The chunking strategy used to chunk the file(s). +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ChunkingStrategyResponseParam + with _$ChunkingStrategyResponseParam { + const ChunkingStrategyResponseParam._(); + + // ------------------------------------------ + // UNION: StaticChunkingStrategyResponseParam + // ------------------------------------------ + + /// Static Chunking Strategy. + const factory ChunkingStrategyResponseParam.static({ + /// Always `static`. + required String type, + + /// Static chunking strategy + required StaticChunkingStrategy static, + }) = StaticChunkingStrategyResponseParam; + + // ------------------------------------------ + // UNION: OtherChunkingStrategyResponseParam + // ------------------------------------------ + + /// Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + /// the file was indexed before the `chunking_strategy` concept was introduced in the API. + const factory ChunkingStrategyResponseParam.other({ + /// Always `other`. + required String type, + }) = OtherChunkingStrategyResponseParam; + + /// Object construction from a JSON representation + factory ChunkingStrategyResponseParam.fromJson(Map json) => + _$ChunkingStrategyResponseParamFromJson(json); +} + +// ========================================== +// ENUM: ChunkingStrategyResponseParamEnumType +// ========================================== + +enum ChunkingStrategyResponseParamEnumType { + @JsonValue('static') + static, + @JsonValue('other') + other, +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart new file mode 100644 index 00000000..14fe08a8 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_tokens_details.dart @@ -0,0 +1,41 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionTokensDetails +// ========================================== + +/// Breakdown of tokens used in a completion. +@freezed +class CompletionTokensDetails with _$CompletionTokensDetails { + const CompletionTokensDetails._(); + + /// Factory constructor for CompletionTokensDetails + const factory CompletionTokensDetails({ + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens, + }) = _CompletionTokensDetails; + + /// Object construction from a JSON representation + factory CompletionTokensDetails.fromJson(Map json) => + _$CompletionTokensDetailsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['reasoning_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'reasoning_tokens': reasoningTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart index 17826175..86877b8e 100644 --- a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart +++ b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart @@ -23,6 +23,10 @@ class CompletionUsage with _$CompletionUsage { /// Total number of tokens used in the request (prompt + completion). @JsonKey(name: 'total_tokens') required int totalTokens, + + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails, }) = _CompletionUsage; /// Object construction from a JSON representation @@ -33,7 +37,8 @@ class CompletionUsage with _$CompletionUsage { static const List propertyNames = [ 'completion_tokens', 'prompt_tokens', - 'total_tokens' + 'total_tokens', + 'completion_tokens_details' ]; /// Perform validations on the schema property values @@ -47,6 +52,7 @@ class CompletionUsage with _$CompletionUsage { 'completion_tokens': completionTokens, 'prompt_tokens': promptTokens, 'total_tokens': totalTokens, + 'completion_tokens_details': completionTokensDetails, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 16db2e01..312d8f5c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -27,29 +27,47 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? responseFormat, @@ -135,6 +153,8 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum AssistantModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -163,6 +183,12 @@ enum AssistantModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -177,6 +203,14 @@ enum AssistantModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== @@ -240,8 +274,6 @@ class _AssistantModelConverter /// `auto` is the default value enum CreateAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -250,11 +282,23 @@ enum CreateAssistantResponseFormatMode { // CLASS: CreateAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateAssistantRequestResponseFormat with _$CreateAssistantRequestResponseFormat { @@ -266,9 +310,9 @@ sealed class CreateAssistantRequestResponseFormat ) = CreateAssistantRequestResponseFormatEnumeration; /// No Description - const factory CreateAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateAssistantRequestResponseFormatAssistantsResponseFormat; + const factory CreateAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateAssistantRequestResponseFormat.fromJson( @@ -298,8 +342,8 @@ class _CreateAssistantRequestResponseFormatConverter } if (data is Map) { try { - return CreateAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -313,9 +357,7 @@ class _CreateAssistantRequestResponseFormatConverter return switch (data) { CreateAssistantRequestResponseFormatEnumeration(value: final v) => _$CreateAssistantResponseFormatModeEnumMap[v]!, - CreateAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart index 5014b4f1..b7a86f72 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart @@ -19,7 +19,7 @@ class CreateBatchRequest with _$CreateBatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. @JsonKey(name: 'input_file_id') required String inputFileId, /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index 997af317..3d59ae2b 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -15,10 +15,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Factory constructor for CreateChatCompletionRequest const factory CreateChatCompletionRequest({ - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() required ChatCompletionModel model, - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). required List messages, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. @@ -30,22 +32,37 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? logprobs, - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) @Default(1) int? n, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -55,25 +72,59 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? presencePenalty, - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? seed, + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + CreateChatCompletionRequestServiceTier? serviceTier, + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) @Default(false) bool? stream, /// Options for streaming response. Only set this when you set `stream: true`. @@ -90,20 +141,28 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// We generally recommend altering this or `temperature` but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? tools, /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? user, @@ -112,7 +171,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -138,10 +198,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs', 'top_logprobs', 'max_tokens', + 'max_completion_tokens', 'n', 'presence_penalty', 'response_format', 'seed', + 'service_tier', 'stop', 'stream', 'stream_options', @@ -149,6 +211,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p', 'tools', 'tool_choice', + 'parallel_tool_calls', 'user', 'function_call', 'functions' @@ -226,10 +289,12 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'logprobs': logprobs, 'top_logprobs': topLogprobs, 'max_tokens': maxTokens, + 'max_completion_tokens': maxCompletionTokens, 'n': n, 'presence_penalty': presencePenalty, 'response_format': responseFormat, 'seed': seed, + 'service_tier': serviceTier, 'stop': stop, 'stream': stream, 'stream_options': streamOptions, @@ -237,6 +302,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'top_p': topP, 'tools': tools, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'user': user, 'function_call': functionCall, 'functions': functions, @@ -250,6 +316,8 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum ChatCompletionModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -278,6 +346,12 @@ enum ChatCompletionModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -292,13 +366,22 @@ enum ChatCompletionModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== // CLASS: ChatCompletionModel // ========================================== -/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. +/// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) +/// table for details on which models work with the Chat API. @freezed sealed class ChatCompletionModel with _$ChatCompletionModel { const ChatCompletionModel._(); @@ -351,43 +434,25 @@ class _ChatCompletionModelConverter } // ========================================== -// CLASS: ChatCompletionResponseFormat +// ENUM: CreateChatCompletionRequestServiceTier // ========================================== -/// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. -/// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers +/// subscribed to the scale tier service: +/// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits +/// until they are exhausted. +/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the +/// default service tier with a lower uptime SLA and no latency guarantee. +/// - If set to 'default', the request will be processed using the default service tier with a lower uptime +/// SLA and no latency guarantee. +/// - When not set, the default behavior is 'auto'. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. -@freezed -class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat { - const ChatCompletionResponseFormat._(); - - /// Factory constructor for ChatCompletionResponseFormat - const factory ChatCompletionResponseFormat({ - /// Must be one of `text` or `json_object`. - @Default(ChatCompletionResponseFormatType.text) - ChatCompletionResponseFormatType type, - }) = _ChatCompletionResponseFormat; - - /// Object construction from a JSON representation - factory ChatCompletionResponseFormat.fromJson(Map json) => - _$ChatCompletionResponseFormatFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['type']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'type': type, - }; - } +/// When this parameter is set, the response body will include the `service_tier` utilized. +enum CreateChatCompletionRequestServiceTier { + @JsonValue('auto') + auto, + @JsonValue('default') + vDefault, } // ========================================== @@ -467,7 +532,8 @@ enum ChatCompletionToolChoiceMode { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. -/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. +/// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the +/// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @freezed @@ -555,7 +621,8 @@ enum ChatCompletionFunctionCallMode { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. -/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. +/// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that +/// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @freezed @@ -620,15 +687,3 @@ class _ChatCompletionFunctionCallConverter }; } } - -// ========================================== -// ENUM: ChatCompletionResponseFormatType -// ========================================== - -/// Must be one of `text` or `json_object`. -enum ChatCompletionResponseFormatType { - @JsonValue('text') - text, - @JsonValue('json_object') - jsonObject, -} diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart index 95771ce0..9a9687d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart @@ -27,6 +27,15 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { /// The model used for the chat completion. required String model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -50,6 +59,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -67,6 +77,7 @@ class CreateChatCompletionResponse with _$CreateChatCompletionResponse { 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 18cab5fa..cc0341fc 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -24,11 +24,20 @@ class CreateChatCompletionStreamResponse required List choices, /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - required int created, + @JsonKey(includeIfNull: false) int? created, /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ServiceTier? serviceTier, + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -36,7 +45,7 @@ class CreateChatCompletionStreamResponse String? systemFingerprint, /// The object type, which is always `chat.completion.chunk`. - required String object, + @JsonKey(includeIfNull: false) String? object, /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? usage, @@ -53,6 +62,7 @@ class CreateChatCompletionStreamResponse 'choices', 'created', 'model', + 'service_tier', 'system_fingerprint', 'object', 'usage' @@ -70,6 +80,7 @@ class CreateChatCompletionStreamResponse 'choices': choices, 'created': created, 'model': model, + 'service_tier': serviceTier, 'system_fingerprint': systemFingerprint, 'object': object, 'usage': usage, diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart index 31bb714a..ff66b86c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart @@ -347,7 +347,7 @@ class _CompletionPromptConverter @override CompletionPrompt fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return CompletionPromptListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart index fec9f621..10c24925 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart @@ -179,7 +179,7 @@ class _EmbeddingInputConverter @override EmbeddingInput fromJson(Object? data) { - if (data is List && data.every((item) => item is List)) { + if (data is List && data.every((item) => item is List)) { return EmbeddingInputListListInt(data.cast()); } if (data is List && data.every((item) => item is int)) { diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index 14929898..863ffb57 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -23,7 +23,12 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') required String trainingFile, @@ -32,9 +37,9 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? hyperparameters, - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? suffix, /// The ID of an uploaded file that contains validation data. @@ -75,7 +80,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// Validation constants static const suffixMinLengthValue = 1; - static const suffixMaxLengthValue = 40; + static const suffixMaxLengthValue = 64; static const seedMinValue = 0; static const seedMaxValue = 2147483647; @@ -122,6 +127,8 @@ enum FineTuningModels { davinci002, @JsonValue('gpt-3.5-turbo') gpt35Turbo, + @JsonValue('gpt-4o-mini') + gpt4oMini, } // ========================================== diff --git a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart index bad29bc1..fc42a4d2 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_message_request.dart @@ -25,7 +25,9 @@ class CreateMessageRequest with _$CreateMessageRequest { /// A list of files attached to the message, and the tools they were added to. @JsonKey(includeIfNull: false) List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateMessageRequest; @@ -88,8 +90,10 @@ class _CreateMessageRequestContentConverter @override CreateMessageRequestContent fromJson(Object? data) { - if (data is List && data.every((item) => item is MessageContent)) { - return CreateMessageRequestContentListMessageContent(data.cast()); + if (data is List && data.every((item) => item is Map)) { + return CreateMessageRequestContentListMessageContent(data + .map((i) => MessageContent.fromJson(i as Map)) + .toList(growable: false)); } if (data is String) { return CreateMessageRequestContentString(data); diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index edd89f09..3698ed7c 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -37,13 +37,18 @@ class CreateRunRequest with _$CreateRunRequest { /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. @JsonKey(includeIfNull: false) List? tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -69,11 +74,28 @@ class CreateRunRequest with _$CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -101,6 +123,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -155,6 +178,7 @@ class CreateRunRequest with _$CreateRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; @@ -167,6 +191,8 @@ class CreateRunRequest with _$CreateRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum RunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -195,6 +221,12 @@ enum RunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -209,6 +241,14 @@ enum RunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== @@ -221,12 +261,12 @@ sealed class CreateRunRequestModel with _$CreateRunRequestModel { const CreateRunRequestModel._(); /// Available models. Mind that the list may not be exhaustive nor up-to-date. - const factory CreateRunRequestModel.enumeration( + const factory CreateRunRequestModel.model( RunModels value, ) = CreateRunRequestModelEnumeration; /// The ID of the model to use for this request. - const factory CreateRunRequestModel.string( + const factory CreateRunRequestModel.modelId( String value, ) = CreateRunRequestModelString; @@ -361,8 +401,6 @@ class _CreateRunRequestToolChoiceConverter /// `auto` is the default value enum CreateRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -371,11 +409,23 @@ enum CreateRunRequestResponseFormatMode { // CLASS: CreateRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateRunRequestResponseFormat with _$CreateRunRequestResponseFormat { @@ -387,9 +437,9 @@ sealed class CreateRunRequestResponseFormat ) = CreateRunRequestResponseFormatEnumeration; /// No Description - const factory CreateRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateRunRequestResponseFormat.fromJson(Map json) => @@ -418,8 +468,8 @@ class _CreateRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -433,7 +483,7 @@ class _CreateRunRequestResponseFormatConverter return switch (data) { CreateRunRequestResponseFormatEnumeration(value: final v) => _$CreateRunRequestResponseFormatModeEnumMap[v]!, - CreateRunRequestResponseFormatAssistantsResponseFormat(value: final v) => + CreateRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 5f7692df..d58474f8 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -36,13 +36,18 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, @@ -68,11 +73,28 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -100,6 +122,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format', 'stream' ]; @@ -154,6 +177,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, 'stream': stream, }; @@ -166,6 +190,8 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// Available models. Mind that the list may not be exhaustive nor up-to-date. enum ThreadAndRunModels { + @JsonValue('chatgpt-4o-latest') + chatgpt4oLatest, @JsonValue('gpt-4') gpt4, @JsonValue('gpt-4-32k') @@ -194,6 +220,12 @@ enum ThreadAndRunModels { gpt4o, @JsonValue('gpt-4o-2024-05-13') gpt4o20240513, + @JsonValue('gpt-4o-2024-08-06') + gpt4o20240806, + @JsonValue('gpt-4o-mini') + gpt4oMini, + @JsonValue('gpt-4o-mini-2024-07-18') + gpt4oMini20240718, @JsonValue('gpt-3.5-turbo') gpt35Turbo, @JsonValue('gpt-3.5-turbo-16k') @@ -208,6 +240,14 @@ enum ThreadAndRunModels { gpt35Turbo0613, @JsonValue('gpt-3.5-turbo-1106') gpt35Turbo1106, + @JsonValue('o1-mini') + o1Mini, + @JsonValue('o1-mini-2024-09-12') + o1Mini20240912, + @JsonValue('o1-preview') + o1Preview, + @JsonValue('o1-preview-2024-09-12') + o1Preview20240912, } // ========================================== @@ -367,8 +407,6 @@ class _CreateThreadAndRunRequestToolChoiceConverter /// `auto` is the default value enum CreateThreadAndRunRequestResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -377,11 +415,23 @@ enum CreateThreadAndRunRequestResponseFormatMode { // CLASS: CreateThreadAndRunRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class CreateThreadAndRunRequestResponseFormat with _$CreateThreadAndRunRequestResponseFormat { @@ -393,9 +443,9 @@ sealed class CreateThreadAndRunRequestResponseFormat ) = CreateThreadAndRunRequestResponseFormatEnumeration; /// No Description - const factory CreateThreadAndRunRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat; + const factory CreateThreadAndRunRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = CreateThreadAndRunRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory CreateThreadAndRunRequestResponseFormat.fromJson( @@ -427,8 +477,8 @@ class _CreateThreadAndRunRequestResponseFormatConverter } if (data is Map) { try { - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return CreateThreadAndRunRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -442,9 +492,7 @@ class _CreateThreadAndRunRequestResponseFormatConverter return switch (data) { CreateThreadAndRunRequestResponseFormatEnumeration(value: final v) => _$CreateThreadAndRunRequestResponseFormatModeEnumMap[v]!, - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + CreateThreadAndRunRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart index 22823647..2cfb4b35 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_request.dart @@ -22,7 +22,9 @@ class CreateThreadRequest with _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _CreateThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart index 6a607eae..3111c855 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_batch_request.dart @@ -18,6 +18,11 @@ class CreateVectorStoreFileBatchRequest const factory CreateVectorStoreFileBatchRequest({ /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids') required List fileIds, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileBatchRequest; /// Object construction from a JSON representation @@ -26,7 +31,7 @@ class CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids']; + static const List propertyNames = ['file_ids', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -37,6 +42,7 @@ class CreateVectorStoreFileBatchRequest Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart index 742fae3b..c18eadee 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_file_request.dart @@ -17,6 +17,11 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { const factory CreateVectorStoreFileRequest({ /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_id') required String fileId, + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, }) = _CreateVectorStoreFileRequest; /// Object construction from a JSON representation @@ -24,7 +29,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { _$CreateVectorStoreFileRequestFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id']; + static const List propertyNames = ['file_id', 'chunking_strategy']; /// Perform validations on the schema property values String? validateSchema() { @@ -35,6 +40,7 @@ class CreateVectorStoreFileRequest with _$CreateVectorStoreFileRequest { Map toMap() { return { 'file_id': fileId, + 'chunking_strategy': chunkingStrategy, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart index cce0ccd3..b26b786e 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_vector_store_request.dart @@ -15,17 +15,24 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Factory constructor for CreateVectorStoreRequest const factory CreateVectorStoreRequest({ + /// The name of the vector store. + @JsonKey(includeIfNull: false) String? name, + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - /// The name of the vector store. - required String name, - /// The expiration policy for a vector store. @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _CreateVectorStoreRequest; @@ -35,9 +42,10 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// List of all property names of schema static const List propertyNames = [ - 'file_ids', 'name', + 'file_ids', 'expires_after', + 'chunking_strategy', 'metadata' ]; @@ -49,9 +57,10 @@ class CreateVectorStoreRequest with _$CreateVectorStoreRequest { /// Map representation of object (not serialized) Map toMap() { return { - 'file_ids': fileIds, 'name': name, + 'file_ids': fileIds, 'expires_after': expiresAfter, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart new file mode 100644 index 00000000..6dfc6218 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranker.dart @@ -0,0 +1,17 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: FileSearchRanker +// ========================================== + +/// The ranker to use for the file search. If not specified will use the `auto` ranker. +enum FileSearchRanker { + @JsonValue('auto') + auto, + @JsonValue('default_2024_08_21') + default20240821, +} diff --git a/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart new file mode 100644 index 00000000..03533c56 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/file_search_ranking_options.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FileSearchRankingOptions +// ========================================== + +/// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and +/// a score_threshold of 0. +/// +/// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) +/// for more information. +@freezed +class FileSearchRankingOptions with _$FileSearchRankingOptions { + const FileSearchRankingOptions._(); + + /// Factory constructor for FileSearchRankingOptions + const factory FileSearchRankingOptions({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + FileSearchRanker? ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') required double scoreThreshold, + }) = _FileSearchRankingOptions; + + /// Object construction from a JSON representation + factory FileSearchRankingOptions.fromJson(Map json) => + _$FileSearchRankingOptionsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart index 51d89b60..409aa1d7 100644 --- a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart @@ -15,8 +15,10 @@ class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters { /// Factory constructor for FineTuningJobHyperparameters const factory FineTuningJobHyperparameters({ - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') required FineTuningNEpochs nEpochs, @@ -56,8 +58,10 @@ enum FineTuningNEpochsOptions { // CLASS: FineTuningNEpochs // ========================================== -/// The number of epochs to train the model for. An epoch refers to one -/// full cycle through the training dataset. +/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. +/// +/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number +/// manually, we support any number between 1 and 50 epochs. @freezed sealed class FineTuningNEpochs with _$FineTuningNEpochs { const FineTuningNEpochs._(); diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 8049253e..ac87dc02 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -15,16 +15,23 @@ class FunctionObject with _$FunctionObject { /// Factory constructor for FunctionObject const factory FunctionObject({ - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. required String name, /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? description, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, + + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, }) = _FunctionObject; /// Object construction from a JSON representation @@ -35,7 +42,8 @@ class FunctionObject with _$FunctionObject { static const List propertyNames = [ 'name', 'description', - 'parameters' + 'parameters', + 'strict' ]; /// Perform validations on the schema property values @@ -49,6 +57,7 @@ class FunctionObject with _$FunctionObject { 'name': name, 'description': description, 'parameters': parameters, + 'strict': strict, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart index abd11036..2429f8ba 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // TYPE: FunctionParameters // ========================================== -/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. typedef FunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart new file mode 100644 index 00000000..32f20701 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/json_schema_object.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: JsonSchemaObject +// ========================================== + +/// A JSON Schema object. +@freezed +class JsonSchemaObject with _$JsonSchemaObject { + const JsonSchemaObject._(); + + /// Factory constructor for JsonSchemaObject + const factory JsonSchemaObject({ + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + required String name, + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) String? description, + + /// The schema for the response format, described as a JSON Schema object. + required Map schema, + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) @Default(false) bool? strict, + }) = _JsonSchemaObject; + + /// Object construction from a JSON representation + factory JsonSchemaObject.fromJson(Map json) => + _$JsonSchemaObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'schema', + 'strict' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'schema': schema, + 'strict': strict, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_content.dart b/packages/openai_dart/lib/src/generated/schema/message_content.dart index 14e23e22..46783eae 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content.dart @@ -52,6 +52,19 @@ sealed class MessageContent with _$MessageContent { required MessageContentText text, }) = MessageContentTextObject; + // ------------------------------------------ + // UNION: MessageContentRefusalObject + // ------------------------------------------ + + /// The refusal content generated by the assistant. + const factory MessageContent.refusal({ + /// Always `refusal`. + required String type, + + /// No Description + required String refusal, + }) = MessageContentRefusalObject; + /// Object construction from a JSON representation factory MessageContent.fromJson(Map json) => _$MessageContentFromJson(json); @@ -68,4 +81,6 @@ enum MessageContentEnumType { imageUrl, @JsonValue('text') text, + @JsonValue('refusal') + refusal, } diff --git a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart index 5317431b..c8d3a8f1 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_content_text_annotations_file_citation.dart @@ -18,9 +18,6 @@ class MessageContentTextAnnotationsFileCitation const factory MessageContentTextAnnotationsFileCitation({ /// The ID of the specific File the citation is from. @JsonKey(name: 'file_id') required String fileId, - - /// The specific quote in the file. - required String quote, }) = _MessageContentTextAnnotationsFileCitation; /// Object construction from a JSON representation @@ -29,7 +26,7 @@ class MessageContentTextAnnotationsFileCitation _$MessageContentTextAnnotationsFileCitationFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_id', 'quote']; + static const List propertyNames = ['file_id']; /// Perform validations on the schema property values String? validateSchema() { @@ -40,7 +37,6 @@ class MessageContentTextAnnotationsFileCitation Map toMap() { return { 'file_id': fileId, - 'quote': quote, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart index f53291ee..738ab400 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_delta_content.dart @@ -46,7 +46,49 @@ sealed class MessageDeltaContent with _$MessageDeltaContent { @JsonKey(includeIfNull: false) MessageDeltaContentText? text, }) = MessageDeltaContentTextObject; + // ------------------------------------------ + // UNION: MessageDeltaContentRefusalObject + // ------------------------------------------ + + /// The refusal content that is part of a message. + const factory MessageDeltaContent.refusal({ + /// The index of the refusal part in the message. + required int index, + + /// Always `refusal`. + required String type, + + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) String? refusal, + }) = MessageDeltaContentRefusalObject; + + // ------------------------------------------ + // UNION: MessageDeltaContentImageUrlObject + // ------------------------------------------ + + /// References an image URL in the content of a message. + const factory MessageDeltaContent.imageUrl({ + /// The index of the content part in the message. + required int index, + + /// Always `image_url`. + required String type, + + /// The image URL part of a message. + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl, + }) = MessageDeltaContentImageUrlObject; + /// Object construction from a JSON representation factory MessageDeltaContent.fromJson(Map json) => _$MessageDeltaContentFromJson(json); } + +// ========================================== +// ENUM: MessageDeltaContentEnumType +// ========================================== + +enum MessageDeltaContentEnumType { + @JsonValue('refusal') + refusal, +} diff --git a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart b/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart deleted file mode 100644 index 1008bbb0..00000000 --- a/packages/openai_dart/lib/src/generated/schema/message_delta_content_image_url_object.dart +++ /dev/null @@ -1,51 +0,0 @@ -// coverage:ignore-file -// GENERATED CODE - DO NOT MODIFY BY HAND -// ignore_for_file: type=lint -// ignore_for_file: invalid_annotation_target -part of open_a_i_schema; - -// ========================================== -// CLASS: MessageDeltaContentImageUrlObject -// ========================================== - -/// References an image URL in the content of a message. -@freezed -class MessageDeltaContentImageUrlObject - with _$MessageDeltaContentImageUrlObject { - const MessageDeltaContentImageUrlObject._(); - - /// Factory constructor for MessageDeltaContentImageUrlObject - const factory MessageDeltaContentImageUrlObject({ - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) int? index, - - /// Always `image_url`. - @JsonKey(includeIfNull: false) String? type, - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl, - }) = _MessageDeltaContentImageUrlObject; - - /// Object construction from a JSON representation - factory MessageDeltaContentImageUrlObject.fromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectFromJson(json); - - /// List of all property names of schema - static const List propertyNames = ['index', 'type', 'image_url']; - - /// Perform validations on the schema property values - String? validateSchema() { - return null; - } - - /// Map representation of object (not serialized) - Map toMap() { - return { - 'index': index, - 'type': type, - 'image_url': imageUrl, - }; - } -} diff --git a/packages/openai_dart/lib/src/generated/schema/message_object.dart b/packages/openai_dart/lib/src/generated/schema/message_object.dart index fae9d2ae..9e991a27 100644 --- a/packages/openai_dart/lib/src/generated/schema/message_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/message_object.dart @@ -58,7 +58,9 @@ class MessageObject with _$MessageObject { /// A list of files attached to the message, and the tools they were added to. required List? attachments, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _MessageObject; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index b02d123e..5bd7ad65 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -27,7 +27,8 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. @JsonKey(includeIfNull: false) String? instructions, - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @Default([]) List tools, /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -37,22 +38,39 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? responseFormat, @@ -140,8 +158,6 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// `auto` is the default value enum ModifyAssistantResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -150,11 +166,23 @@ enum ModifyAssistantResponseFormatMode { // CLASS: ModifyAssistantRequestResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class ModifyAssistantRequestResponseFormat with _$ModifyAssistantRequestResponseFormat { @@ -166,9 +194,9 @@ sealed class ModifyAssistantRequestResponseFormat ) = ModifyAssistantRequestResponseFormatEnumeration; /// No Description - const factory ModifyAssistantRequestResponseFormat.format( - AssistantsResponseFormat value, - ) = ModifyAssistantRequestResponseFormatAssistantsResponseFormat; + const factory ModifyAssistantRequestResponseFormat.responseFormat( + ResponseFormat value, + ) = ModifyAssistantRequestResponseFormatResponseFormat; /// Object construction from a JSON representation factory ModifyAssistantRequestResponseFormat.fromJson( @@ -198,8 +226,8 @@ class _ModifyAssistantRequestResponseFormatConverter } if (data is Map) { try { - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return ModifyAssistantRequestResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -213,9 +241,7 @@ class _ModifyAssistantRequestResponseFormatConverter return switch (data) { ModifyAssistantRequestResponseFormatEnumeration(value: final v) => _$ModifyAssistantResponseFormatModeEnumMap[v]!, - ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - value: final v - ) => + ModifyAssistantRequestResponseFormatResponseFormat(value: final v) => v.toJson(), null => null, }; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart index b6e7d119..b7ec05e1 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_message_request.dart @@ -15,7 +15,9 @@ class ModifyMessageRequest with _$ModifyMessageRequest { /// Factory constructor for ModifyMessageRequest const factory ModifyMessageRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyMessageRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart index 3d113815..973a0b3d 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_run_request.dart @@ -15,7 +15,9 @@ class ModifyRunRequest with _$ModifyRunRequest { /// Factory constructor for ModifyRunRequest const factory ModifyRunRequest({ - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyRunRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart index a335f1b6..96f4983f 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_thread_request.dart @@ -19,7 +19,9 @@ class ModifyThreadRequest with _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, }) = _ModifyThreadRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/response_format.dart b/packages/openai_dart/lib/src/generated/schema/response_format.dart new file mode 100644 index 00000000..7b975680 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format.dart @@ -0,0 +1,82 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ResponseFormat +// ========================================== + +/// An object specifying the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer +/// than `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model +/// will match your supplied JSON schema. +/// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +/// +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is +/// valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system +/// or user message. Without this, the model may generate an unending stream of whitespace until the generation +/// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message +/// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded +/// `max_tokens` or the conversation exceeded the max context length. +@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake) +sealed class ResponseFormat with _$ResponseFormat { + const ResponseFormat._(); + + // ------------------------------------------ + // UNION: ResponseFormatText + // ------------------------------------------ + + /// The model should respond with plain text. + const factory ResponseFormat.text({ + /// The type of response format being defined. + @Default(ResponseFormatType.text) ResponseFormatType type, + }) = ResponseFormatText; + + // ------------------------------------------ + // UNION: ResponseFormatJsonObject + // ------------------------------------------ + + /// The model should respond with a JSON object. + const factory ResponseFormat.jsonObject({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonObject) ResponseFormatType type, + }) = ResponseFormatJsonObject; + + // ------------------------------------------ + // UNION: ResponseFormatJsonSchema + // ------------------------------------------ + + /// The model should respond with a JSON object that adheres to the specified schema. + const factory ResponseFormat.jsonSchema({ + /// The type of response format being defined. + @Default(ResponseFormatType.jsonSchema) ResponseFormatType type, + + /// A JSON Schema object. + @JsonKey(name: 'json_schema') required JsonSchemaObject jsonSchema, + }) = ResponseFormatJsonSchema; + + /// Object construction from a JSON representation + factory ResponseFormat.fromJson(Map json) => + _$ResponseFormatFromJson(json); +} + +// ========================================== +// ENUM: ResponseFormatEnumType +// ========================================== + +enum ResponseFormatEnumType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/response_format_type.dart b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart new file mode 100644 index 00000000..da215209 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/response_format_type.dart @@ -0,0 +1,19 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ResponseFormatType +// ========================================== + +/// The type of response format being defined. +enum ResponseFormatType { + @JsonValue('text') + text, + @JsonValue('json_object') + jsonObject, + @JsonValue('json_schema') + jsonSchema, +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index e34403a8..98fd5f0c 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -68,7 +68,9 @@ class RunObject with _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. required List tools, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -99,11 +101,27 @@ class RunObject with _$RunObject { @JsonKey(name: 'tool_choice') required RunObjectToolChoice? toolChoice, - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') required bool? parallelToolCalls, + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required RunObjectResponseFormat responseFormat, @@ -140,6 +158,7 @@ class RunObject with _$RunObject { 'max_completion_tokens', 'truncation_strategy', 'tool_choice', + 'parallel_tool_calls', 'response_format' ]; @@ -187,6 +206,7 @@ class RunObject with _$RunObject { 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, 'tool_choice': toolChoice, + 'parallel_tool_calls': parallelToolCalls, 'response_format': responseFormat, }; } @@ -432,8 +452,6 @@ class _RunObjectToolChoiceConverter /// `auto` is the default value enum RunObjectResponseFormatMode { - @JsonValue('none') - none, @JsonValue('auto') auto, } @@ -442,11 +460,23 @@ enum RunObjectResponseFormatMode { // CLASS: RunObjectResponseFormat // ========================================== -/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. +/// Specifies the format that the model must output. Compatible with +/// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +/// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models +/// since `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures +/// the model will match your supplied JSON schema. Learn more in the +/// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// -/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates +/// is valid JSON. /// -/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a +/// system or user message. Without this, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note +/// that the message content may be partially cut off if `finish_reason="length"`, which indicates the +/// generation exceeded `max_tokens` or the conversation exceeded the max context length. @freezed sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { const RunObjectResponseFormat._(); @@ -457,9 +487,9 @@ sealed class RunObjectResponseFormat with _$RunObjectResponseFormat { ) = RunObjectResponseFormatEnumeration; /// No Description - const factory RunObjectResponseFormat.format( - AssistantsResponseFormat value, - ) = RunObjectResponseFormatAssistantsResponseFormat; + const factory RunObjectResponseFormat.responseFormat( + ResponseFormat value, + ) = RunObjectResponseFormatResponseFormat; /// Object construction from a JSON representation factory RunObjectResponseFormat.fromJson(Map json) => @@ -483,8 +513,8 @@ class _RunObjectResponseFormatConverter } if (data is Map) { try { - return RunObjectResponseFormatAssistantsResponseFormat( - AssistantsResponseFormat.fromJson(data), + return RunObjectResponseFormatResponseFormat( + ResponseFormat.fromJson(data), ); } catch (e) {} } @@ -498,8 +528,7 @@ class _RunObjectResponseFormatConverter return switch (data) { RunObjectResponseFormatEnumeration(value: final v) => _$RunObjectResponseFormatModeEnumMap[v]!, - RunObjectResponseFormatAssistantsResponseFormat(value: final v) => - v.toJson(), + RunObjectResponseFormatResponseFormat(value: final v) => v.toJson(), }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart index c4605b7b..327de9f5 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls.dart @@ -42,8 +42,9 @@ sealed class RunStepDetailsToolCalls with _$RunStepDetailsToolCalls { /// The type of tool call. This is always going to be `file_search` for this type of tool call. required String type, - /// For now, this is always going to be an empty object. - @JsonKey(name: 'file_search') required Map fileSearch, + /// The definition of the file search that was called. + @JsonKey(name: 'file_search') + required RunStepDetailsToolCallsFileSearch fileSearch, }) = RunStepDetailsToolCallsFileSearchObject; // ------------------------------------------ diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart new file mode 100644 index 00000000..16f72322 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearch +// ========================================== + +/// The definition of the file search that was called. +@freezed +class RunStepDetailsToolCallsFileSearch + with _$RunStepDetailsToolCallsFileSearch { + const RunStepDetailsToolCallsFileSearch._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearch + const factory RunStepDetailsToolCallsFileSearch({ + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? results, + }) = _RunStepDetailsToolCallsFileSearch; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearch.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranking_options', 'results']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranking_options': rankingOptions, + 'results': results, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart new file mode 100644 index 00000000..61b2ff06 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_ranking_options_object.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchRankingOptionsObject +// ========================================== + +/// The ranking options for the file search. +@freezed +class RunStepDetailsToolCallsFileSearchRankingOptionsObject + with _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const RunStepDetailsToolCallsFileSearchRankingOptionsObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchRankingOptionsObject + const factory RunStepDetailsToolCallsFileSearchRankingOptionsObject({ + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + required FileSearchRanker ranker, + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') required double scoreThreshold, + }) = _RunStepDetailsToolCallsFileSearchRankingOptionsObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['ranker', 'score_threshold']; + + /// Validation constants + static const scoreThresholdMinValue = 0.0; + static const scoreThresholdMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (scoreThreshold < scoreThresholdMinValue) { + return "The value of 'scoreThreshold' cannot be < $scoreThresholdMinValue"; + } + if (scoreThreshold > scoreThresholdMaxValue) { + return "The value of 'scoreThreshold' cannot be > $scoreThresholdMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'ranker': ranker, + 'score_threshold': scoreThreshold, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart new file mode 100644 index 00000000..3ba23a07 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_content.dart @@ -0,0 +1,46 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultContent +// ========================================== + +/// The content of the result that was found. +@freezed +class RunStepDetailsToolCallsFileSearchResultContent + with _$RunStepDetailsToolCallsFileSearchResultContent { + const RunStepDetailsToolCallsFileSearchResultContent._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultContent + const factory RunStepDetailsToolCallsFileSearchResultContent({ + /// The type of the content. + @Default('text') String type, + + /// The text content of the file. + @JsonKey(includeIfNull: false) String? text, + }) = _RunStepDetailsToolCallsFileSearchResultContent; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['type', 'text']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'type': type, + 'text': text, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart new file mode 100644 index 00000000..4b1a1de0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/run_step_details_tool_calls_file_search_result_object.dart @@ -0,0 +1,71 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: RunStepDetailsToolCallsFileSearchResultObject +// ========================================== + +/// A result instance of the file search. +@freezed +class RunStepDetailsToolCallsFileSearchResultObject + with _$RunStepDetailsToolCallsFileSearchResultObject { + const RunStepDetailsToolCallsFileSearchResultObject._(); + + /// Factory constructor for RunStepDetailsToolCallsFileSearchResultObject + const factory RunStepDetailsToolCallsFileSearchResultObject({ + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') required String fileId, + + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') required String fileName, + + /// The score of the result. All values must be a floating point number between 0 and 1. + required double score, + + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? content, + }) = _RunStepDetailsToolCallsFileSearchResultObject; + + /// Object construction from a JSON representation + factory RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'file_id', + 'file_name', + 'score', + 'content' + ]; + + /// Validation constants + static const scoreMinValue = 0.0; + static const scoreMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (score < scoreMinValue) { + return "The value of 'score' cannot be < $scoreMinValue"; + } + if (score > scoreMaxValue) { + return "The value of 'score' cannot be > $scoreMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'file_id': fileId, + 'file_name': fileName, + 'score': score, + 'content': content, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart index 2e56839e..ede505da 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_step_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_step_object.dart @@ -58,7 +58,9 @@ class RunStepObject with _$RunStepObject { /// The Unix timestamp (in seconds) for when the run step completed. @JsonKey(name: 'completed_at') required int? completedAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index 6d9b2613..265649d4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -22,6 +22,8 @@ part 'chat_completion_message_function_call.dart'; part 'chat_completion_function_call_option.dart'; part 'function_object.dart'; part 'function_parameters.dart'; +part 'response_format_type.dart'; +part 'json_schema_object.dart'; part 'chat_completion_tool.dart'; part 'chat_completion_named_tool_choice.dart'; part 'chat_completion_message_tool_calls.dart'; @@ -30,6 +32,7 @@ part 'chat_completion_stream_options.dart'; part 'create_chat_completion_response.dart'; part 'chat_completion_response_choice.dart'; part 'chat_completion_finish_reason.dart'; +part 'service_tier.dart'; part 'chat_completion_logprobs.dart'; part 'chat_completion_token_logprob.dart'; part 'chat_completion_token_top_logprob.dart'; @@ -39,6 +42,7 @@ part 'chat_completion_stream_response_delta.dart'; part 'chat_completion_stream_message_function_call.dart'; part 'chat_completion_stream_message_tool_call_chunk.dart'; part 'completion_usage.dart'; +part 'completion_tokens_details.dart'; part 'create_embedding_request.dart'; part 'create_embedding_response.dart'; part 'embedding.dart'; @@ -70,9 +74,10 @@ part 'create_assistant_request.dart'; part 'modify_assistant_request.dart'; part 'delete_assistant_response.dart'; part 'list_assistants_response.dart'; +part 'file_search_ranking_options.dart'; +part 'file_search_ranker.dart'; part 'assistants_named_tool_choice.dart'; part 'assistants_function_call_option.dart'; -part 'assistants_response_format.dart'; part 'truncation_object.dart'; part 'run_object.dart'; part 'run_completion_usage.dart'; @@ -105,7 +110,6 @@ part 'message_content_image_detail.dart'; part 'message_request_content_text_object.dart'; part 'message_content_text.dart'; part 'message_content_text_annotations_file_citation.dart'; -part 'message_delta_content_image_url_object.dart'; part 'message_delta_content_text.dart'; part 'message_delta_content_text_annotations_file_citation.dart'; part 'run_step_object.dart'; @@ -118,6 +122,10 @@ part 'run_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_delta_step_details_tool_calls_code_object_code_interpreter.dart'; part 'run_step_details_tool_calls_code_output_image.dart'; part 'run_step_delta_step_details_tool_calls_code_output_image.dart'; +part 'run_step_details_tool_calls_file_search.dart'; +part 'run_step_details_tool_calls_file_search_ranking_options_object.dart'; +part 'run_step_details_tool_calls_file_search_result_object.dart'; +part 'run_step_details_tool_calls_file_search_result_content.dart'; part 'run_step_completion_usage.dart'; part 'vector_store_expiration_after.dart'; part 'vector_store_object.dart'; @@ -126,6 +134,7 @@ part 'update_vector_store_request.dart'; part 'list_vector_stores_response.dart'; part 'delete_vector_store_response.dart'; part 'vector_store_file_object.dart'; +part 'static_chunking_strategy.dart'; part 'create_vector_store_file_request.dart'; part 'list_vector_store_files_response.dart'; part 'delete_vector_store_file_response.dart'; @@ -140,6 +149,7 @@ part 'batch.dart'; part 'list_batches_response.dart'; part 'chat_completion_message.dart'; part 'chat_completion_message_content_part.dart'; +part 'response_format.dart'; part 'assistant_tools.dart'; part 'message_content.dart'; part 'message_delta_content.dart'; @@ -151,4 +161,6 @@ part 'run_step_details_tool_calls.dart'; part 'run_step_delta_step_details_tool_calls.dart'; part 'run_step_details_tool_calls_code_output.dart'; part 'run_step_delta_step_details_tool_calls_code_output.dart'; +part 'chunking_strategy_request_param.dart'; +part 'chunking_strategy_response_param.dart'; part 'assistant_stream_event.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 472cae5b..af25caaf 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -123,8 +123,12 @@ mixin _$CreateCompletionRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -178,6 +182,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -276,6 +282,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionModelCopyWith<$Res> get model { @@ -284,6 +292,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionPromptCopyWith<$Res>? get prompt { @@ -296,6 +306,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionStopCopyWith<$Res>? get stop { @@ -308,6 +320,8 @@ class _$CreateCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -377,6 +391,8 @@ class __$$CreateCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -676,7 +692,7 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -699,7 +715,9 @@ class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { topP, user); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> @@ -746,127 +764,129 @@ abstract class _CreateCompletionRequest extends CreateCompletionRequest { factory _CreateCompletionRequest.fromJson(Map json) = _$CreateCompletionRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_CompletionModelConverter() CompletionModel get model; - @override /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. /// /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @override @_CompletionPromptConverter() CompletionPrompt? get prompt; - @override /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. /// /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(name: 'best_of', includeIfNull: false) int? get bestOf; - @override /// Echo back the prompt in addition to the completion + @override @JsonKey(includeIfNull: false) bool? get echo; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. /// /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. /// /// The maximum value for `logprobs` is 5. + @override @JsonKey(includeIfNull: false) int? get logprobs; - @override /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion. /// /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; - @override /// How many completions to generate for each prompt. /// /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. /// /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; - @override /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @override @_CompletionStopConverter() @JsonKey(includeIfNull: false) CompletionStop? get stop; - @override /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// The suffix that comes after a completion of inserted text. /// /// This parameter is only supported for `gpt-3.5-turbo-instruct`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -925,6 +945,8 @@ mixin _$CompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -944,6 +966,9 @@ class _$CompletionModelCopyWithImpl<$Res, $Val extends CompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -966,6 +991,8 @@ class __$$CompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1010,11 +1037,13 @@ class _$CompletionModelEnumerationImpl extends CompletionModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> @@ -1101,7 +1130,10 @@ abstract class CompletionModelEnumeration extends CompletionModel { @override CompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelEnumerationImplCopyWith<_$CompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1124,6 +1156,8 @@ class __$$CompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$CompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1167,11 +1201,13 @@ class _$CompletionModelStringImpl extends CompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> @@ -1258,7 +1294,10 @@ abstract class CompletionModelString extends CompletionModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionModelStringImplCopyWith<_$CompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1333,6 +1372,8 @@ mixin _$CompletionPrompt { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionPrompt to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -1352,6 +1393,9 @@ class _$CompletionPromptCopyWithImpl<$Res, $Val extends CompletionPrompt> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -1374,6 +1418,8 @@ class __$$CompletionPromptListListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1425,12 +1471,14 @@ class _$CompletionPromptListListIntImpl extends CompletionPromptListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> @@ -1529,7 +1577,10 @@ abstract class CompletionPromptListListInt extends CompletionPrompt { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListListIntImplCopyWith<_$CompletionPromptListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1553,6 +1604,8 @@ class __$$CompletionPromptListIntImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListIntImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1603,12 +1656,14 @@ class _$CompletionPromptListIntImpl extends CompletionPromptListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> @@ -1707,7 +1762,10 @@ abstract class CompletionPromptListInt extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListIntImplCopyWith<_$CompletionPromptListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1732,6 +1790,8 @@ class __$$CompletionPromptListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1783,12 +1843,14 @@ class _$CompletionPromptListStringImpl extends CompletionPromptListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> @@ -1887,7 +1949,10 @@ abstract class CompletionPromptListString extends CompletionPrompt { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptListStringImplCopyWith<_$CompletionPromptListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1911,6 +1976,8 @@ class __$$CompletionPromptStringImplCopyWithImpl<$Res> $Res Function(_$CompletionPromptStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1954,11 +2021,13 @@ class _$CompletionPromptStringImpl extends CompletionPromptString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> @@ -2057,7 +2126,10 @@ abstract class CompletionPromptString extends CompletionPrompt { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionPrompt + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionPromptStringImplCopyWith<_$CompletionPromptStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2116,6 +2188,8 @@ mixin _$CompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -2135,6 +2209,9 @@ class _$CompletionStopCopyWithImpl<$Res, $Val extends CompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -2156,6 +2233,8 @@ class __$$CompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2206,12 +2285,14 @@ class _$CompletionStopListStringImpl extends CompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> @@ -2298,7 +2379,10 @@ abstract class CompletionStopListString extends CompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopListStringImplCopyWith<_$CompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2320,6 +2404,8 @@ class __$$CompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$CompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2363,11 +2449,13 @@ class _$CompletionStopStringImpl extends CompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> @@ -2455,7 +2543,10 @@ abstract class CompletionStopString extends CompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of CompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionStopStringImplCopyWith<_$CompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2493,8 +2584,12 @@ mixin _$CreateCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2529,6 +2624,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2572,6 +2669,8 @@ class _$CreateCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -2618,6 +2717,8 @@ class __$$CreateCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2739,7 +2840,7 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -2751,7 +2852,9 @@ class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> @@ -2782,40 +2885,42 @@ abstract class _CreateCompletionResponse extends CreateCompletionResponse { factory _CreateCompletionResponse.fromJson(Map json) = _$CreateCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the completion. - String get id; @override + String get id; /// The list of completion choices the model generated for the input prompt. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the completion was created. - int get created; @override + int get created; /// The model used for completion. - String get model; @override + String get model; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always "text_completion" - CreateCompletionResponseObject get object; @override + CreateCompletionResponseObject get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2844,8 +2949,12 @@ mixin _$CompletionChoice { /// The text of the completion. String get text => throw _privateConstructorUsedError; + /// Serializes this CompletionChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2878,6 +2987,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2906,6 +3017,8 @@ class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> ) as $Val); } + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionLogprobsCopyWith<$Res>? get logprobs { @@ -2948,6 +3061,8 @@ class __$$CompletionChoiceImplCopyWithImpl<$Res> $Res Function(_$CompletionChoiceImpl) _then) : super(_value, _then); + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3032,12 +3147,14 @@ class _$CompletionChoiceImpl extends _CompletionChoice { (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, logprobs, text); - @JsonKey(ignore: true) + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => @@ -3066,29 +3183,31 @@ abstract class _CompletionChoice extends CompletionChoice { factory _CompletionChoice.fromJson(Map json) = _$CompletionChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// or `content_filter` if content was omitted due to a flag from our content filters. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) CompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of generated choices. - int get index; @override + int get index; /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - CompletionLogprobs? get logprobs; @override + CompletionLogprobs? get logprobs; /// The text of the completion. + @override String get text; + + /// Create a copy of CompletionChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3116,8 +3235,12 @@ mixin _$CompletionLogprobs { List?>? get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this CompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3148,6 +3271,8 @@ class _$CompletionLogprobsCopyWithImpl<$Res, $Val extends CompletionLogprobs> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3203,6 +3328,8 @@ class __$$CompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$CompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3327,7 +3454,7 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -3336,7 +3463,9 @@ class _$CompletionLogprobsImpl extends _CompletionLogprobs { const DeepCollectionEquality().hash(_tokens), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => @@ -3366,28 +3495,30 @@ abstract class _CompletionLogprobs extends CompletionLogprobs { factory _CompletionLogprobs.fromJson(Map json) = _$CompletionLogprobsImpl.fromJson; - @override - /// The offset of the token from the beginning of the prompt. + @override @JsonKey(name: 'text_offset', includeIfNull: false) List? get textOffset; - @override /// The log probabilities of tokens in the completion. + @override @JsonKey(name: 'token_logprobs', includeIfNull: false) List? get tokenLogprobs; - @override /// The tokens generated by the model converted back to text. + @override @JsonKey(includeIfNull: false) List? get tokens; - @override /// The log probabilities of the `logprobs` most likely tokens. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) List?>? get topLogprobs; + + /// Create a copy of CompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3399,11 +3530,13 @@ CreateChatCompletionRequest _$CreateChatCompletionRequestFromJson( /// @nodoc mixin _$CreateChatCompletionRequest { - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model => throw _privateConstructorUsedError; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). List get messages => throw _privateConstructorUsedError; @@ -3415,25 +3548,40 @@ mixin _$CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias => throw _privateConstructorUsedError; - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @JsonKey(includeIfNull: false) bool? get logprobs => throw _privateConstructorUsedError; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs => throw _privateConstructorUsedError; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens => throw _privateConstructorUsedError; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens => throw _privateConstructorUsedError; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @JsonKey(includeIfNull: false) int? get n => throw _privateConstructorUsedError; @@ -3443,27 +3591,60 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty => throw _privateConstructorUsedError; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat => - throw _privateConstructorUsedError; + ResponseFormat? get responseFormat => throw _privateConstructorUsedError; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier => + throw _privateConstructorUsedError; + /// Up to 4 sequences where the API will stop generating further tokens. @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop => throw _privateConstructorUsedError; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; @@ -3484,7 +3665,9 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; @@ -3492,7 +3675,8 @@ mixin _$CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @_ChatCompletionToolChoiceOptionConverter() @@ -3500,6 +3684,11 @@ mixin _$CreateChatCompletionRequest { ChatCompletionToolChoiceOption? get toolChoice => throw _privateConstructorUsedError; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; @@ -3509,7 +3698,8 @@ mixin _$CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @_ChatCompletionFunctionCallConverter() @@ -3523,8 +3713,12 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) List? get functions => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3547,12 +3741,19 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3565,6 +3766,8 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3572,7 +3775,7 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? functions}); $ChatCompletionModelCopyWith<$Res> get model; - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; $ChatCompletionStopCopyWith<$Res>? get stop; $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions; $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice; @@ -3590,6 +3793,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3600,10 +3805,12 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3611,6 +3818,7 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3644,6 +3852,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -3655,11 +3867,15 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3688,6 +3904,10 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3703,6 +3923,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionModelCopyWith<$Res> get model { @@ -3711,19 +3933,22 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat { + $ResponseFormatCopyWith<$Res>? get responseFormat { if (_value.responseFormat == null) { return null; } - return $ChatCompletionResponseFormatCopyWith<$Res>(_value.responseFormat!, - (value) { + return $ResponseFormatCopyWith<$Res>(_value.responseFormat!, (value) { return _then(_value.copyWith(responseFormat: value) as $Val); }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStopCopyWith<$Res>? get stop { @@ -3736,6 +3961,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamOptionsCopyWith<$Res>? get streamOptions { @@ -3749,6 +3976,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionToolChoiceOptionCopyWith<$Res>? get toolChoice { @@ -3762,6 +3991,8 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallCopyWith<$Res>? get functionCall { @@ -3795,12 +4026,19 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) bool? logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? maxCompletionTokens, @JsonKey(includeIfNull: false) int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? responseFormat, + ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? stop, @@ -3813,6 +4051,8 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @JsonKey(includeIfNull: false) String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -3822,7 +4062,7 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> @override $ChatCompletionModelCopyWith<$Res> get model; @override - $ChatCompletionResponseFormatCopyWith<$Res>? get responseFormat; + $ResponseFormatCopyWith<$Res>? get responseFormat; @override $ChatCompletionStopCopyWith<$Res>? get stop; @override @@ -3843,6 +4083,8 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3853,10 +4095,12 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? logprobs = freezed, Object? topLogprobs = freezed, Object? maxTokens = freezed, + Object? maxCompletionTokens = freezed, Object? n = freezed, Object? presencePenalty = freezed, Object? responseFormat = freezed, Object? seed = freezed, + Object? serviceTier = freezed, Object? stop = freezed, Object? stream = freezed, Object? streamOptions = freezed, @@ -3864,6 +4108,7 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? topP = freezed, Object? tools = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? user = freezed, Object? functionCall = freezed, Object? functions = freezed, @@ -3897,6 +4142,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.maxTokens : maxTokens // ignore: cast_nullable_to_non_nullable as int?, + maxCompletionTokens: freezed == maxCompletionTokens + ? _value.maxCompletionTokens + : maxCompletionTokens // ignore: cast_nullable_to_non_nullable + as int?, n: freezed == n ? _value.n : n // ignore: cast_nullable_to_non_nullable @@ -3908,11 +4157,15 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormat?, + as ResponseFormat?, seed: freezed == seed ? _value.seed : seed // ignore: cast_nullable_to_non_nullable as int?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as CreateChatCompletionRequestServiceTier?, stop: freezed == stop ? _value.stop : stop // ignore: cast_nullable_to_non_nullable @@ -3941,6 +4194,10 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as ChatCompletionToolChoiceOption?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, user: freezed == user ? _value.user : user // ignore: cast_nullable_to_non_nullable @@ -3970,12 +4227,19 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) this.logprobs, @JsonKey(name: 'top_logprobs', includeIfNull: false) this.topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + this.maxCompletionTokens, @JsonKey(includeIfNull: false) this.n = 1, @JsonKey(name: 'presence_penalty', includeIfNull: false) this.presencePenalty = 0.0, @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @JsonKey(includeIfNull: false) this.seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, @JsonKey(includeIfNull: false) this.stream = false, @JsonKey(name: 'stream_options', includeIfNull: false) this.streamOptions, @@ -3985,6 +4249,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls, @JsonKey(includeIfNull: false) this.user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4000,15 +4266,18 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { Map json) => _$$CreateChatCompletionRequestImplFromJson(json); - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override @_ChatCompletionModelConverter() final ChatCompletionModel model; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). final List _messages; - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override List get messages { if (_messages is EqualUnmodifiableListView) return _messages; @@ -4025,12 +4294,20 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. final Map? _logitBias; /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias { @@ -4041,24 +4318,36 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { return EqualUnmodifiableMapView(value); } - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. @override @JsonKey(includeIfNull: false) final bool? logprobs; - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs; - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). @override @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens; + + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. @override @JsonKey(includeIfNull: false) final int? n; @@ -4070,29 +4359,63 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty; - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat; + final ResponseFormat? responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. @override @JsonKey(includeIfNull: false) final int? seed; + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier; + /// Up to 4 sequences where the API will stop generating further tokens. @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop; - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). @override @JsonKey(includeIfNull: false) final bool? stream; @@ -4116,10 +4439,14 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. final List? _tools; - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. @override @JsonKey(includeIfNull: false) List? get tools { @@ -4134,7 +4461,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. @override @@ -4142,6 +4470,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice; + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). @override @JsonKey(includeIfNull: false) @@ -4152,7 +4486,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. @override @@ -4180,7 +4515,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, maxCompletionTokens: $maxCompletionTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, serviceTier: $serviceTier, stop: $stop, stream: $stream, streamOptions: $streamOptions, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4200,12 +4535,16 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { other.topLogprobs == topLogprobs) && (identical(other.maxTokens, maxTokens) || other.maxTokens == maxTokens) && + (identical(other.maxCompletionTokens, maxCompletionTokens) || + other.maxCompletionTokens == maxCompletionTokens) && (identical(other.n, n) || other.n == n) && (identical(other.presencePenalty, presencePenalty) || other.presencePenalty == presencePenalty) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.seed, seed) || other.seed == seed) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.stop, stop) || other.stop == stop) && (identical(other.stream, stream) || other.stream == stream) && (identical(other.streamOptions, streamOptions) || @@ -4216,6 +4555,8 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().equals(other._tools, _tools) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.user, user) || other.user == user) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && @@ -4223,7 +4564,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { .equals(other._functions, _functions)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -4234,10 +4575,12 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { logprobs, topLogprobs, maxTokens, + maxCompletionTokens, n, presencePenalty, responseFormat, seed, + serviceTier, stop, stream, streamOptions, @@ -4245,12 +4588,15 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { topP, const DeepCollectionEquality().hash(_tools), toolChoice, + parallelToolCalls, user, functionCall, const DeepCollectionEquality().hash(_functions) ]); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> @@ -4279,12 +4625,19 @@ abstract class _CreateChatCompletionRequest @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs, @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + final int? maxCompletionTokens, @JsonKey(includeIfNull: false) final int? n, @JsonKey(name: 'presence_penalty', includeIfNull: false) final double? presencePenalty, @JsonKey(name: 'response_format', includeIfNull: false) - final ChatCompletionResponseFormat? responseFormat, + final ResponseFormat? responseFormat, @JsonKey(includeIfNull: false) final int? seed, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CreateChatCompletionRequestServiceTier? serviceTier, @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) final ChatCompletionStop? stop, @@ -4297,6 +4650,8 @@ abstract class _CreateChatCompletionRequest @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final ChatCompletionToolChoiceOption? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @JsonKey(includeIfNull: false) final String? user, @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) @@ -4309,148 +4664,212 @@ abstract class _CreateChatCompletionRequest factory _CreateChatCompletionRequest.fromJson(Map json) = _$CreateChatCompletionRequestImpl.fromJson; + /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. @override - - /// ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. @_ChatCompletionModelConverter() ChatCompletionModel get model; - @override - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - List get messages; + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @override + List get messages; /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty; - @override /// Modify the likelihood of specified tokens appearing in the completion. /// - /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + /// value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + /// sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + /// likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + /// relevant token. + @override @JsonKey(name: 'logit_bias', includeIfNull: false) Map? get logitBias; - @override - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + /// each output token returned in the `content` of `message`. + @override @JsonKey(includeIfNull: false) bool? get logprobs; - @override - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + /// each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + @override @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; - @override - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + /// completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + /// via API. /// - /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + /// [o1 series models](https://platform.openai.com/docs/guides/reasoning). + @override @JsonKey(name: 'max_tokens', includeIfNull: false) int? get maxTokens; + + /// An upper bound for the number of tokens that can be generated for a completion, including visible output + /// tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). @override + @JsonKey(name: 'max_completion_tokens', includeIfNull: false) + int? get maxCompletionTokens; - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// How many chat completion choices to generate for each input message. Note that you will be charged based on + /// the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + @override @JsonKey(name: 'presence_penalty', includeIfNull: false) double? get presencePenalty; - @override - /// An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + /// than `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + /// will match your supplied JSON schema. + /// Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - @JsonKey(name: 'response_format', includeIfNull: false) - ChatCompletionResponseFormat? get responseFormat; + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + /// valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + /// or user message. Without this, the model may generate an unending stream of whitespace until the generation + /// reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + /// content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + /// `max_tokens` or the conversation exceeded the max context length. + /// Any of: [ResponseFormatText], [ResponseFormatJsonObject], [ResponseFormatJsonSchema] @override + @JsonKey(name: 'response_format', includeIfNull: false) + ResponseFormat? get responseFormat; /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests + /// with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + /// monitor changes in the backend. + @override @JsonKey(includeIfNull: false) int? get seed; + + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers + /// subscribed to the scale tier service: + /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + /// until they are exhausted. + /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + /// default service tier with a lower uptime SLA and no latency guarantee. + /// - If set to 'default', the request will be processed using the default service tier with a lower uptime + /// SLA and no latency guarantee. + /// - When not set, the default behavior is 'auto'. + /// + /// When this parameter is set, the response body will include the `service_tier` utilized. @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CreateChatCompletionRequestServiceTier? get serviceTier; /// Up to 4 sequences where the API will stop generating further tokens. + @override @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) ChatCompletionStop? get stop; - @override - /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override @JsonKey(includeIfNull: false) bool? get stream; - @override /// Options for streaming response. Only set this when you set `stream: true`. + @override @JsonKey(name: 'stream_options', includeIfNull: false) ChatCompletionStreamOptions? get streamOptions; - @override /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. /// /// We generally recommend altering this or `temperature` but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. Currently, only functions are supported as a tool. + /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + /// supported. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools. - /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + /// model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. + @override @_ChatCompletionToolChoiceOptionConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) ChatCompletionToolChoiceOption? get toolChoice; + + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; - @override /// Deprecated in favor of `tool_choice`. /// /// Controls which (if any) function is called by the model. /// `none` means the model will not call a function and instead generates a message. /// `auto` means the model can pick between generating a message or calling a function. - /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + /// Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + /// function. /// /// `none` is the default when no functions are present. `auto` is the default if functions are present. + @override @_ChatCompletionFunctionCallConverter() @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionFunctionCall? get functionCall; - @override /// Deprecated in favor of `tools`. /// /// A list of functions the model may generate JSON inputs for. + @override @JsonKey(includeIfNull: false) List? get functions; + + /// Create a copy of CreateChatCompletionRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4509,6 +4928,8 @@ mixin _$ChatCompletionModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -4528,6 +4949,9 @@ class _$ChatCompletionModelCopyWithImpl<$Res, $Val extends ChatCompletionModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -4550,6 +4974,8 @@ class __$$ChatCompletionModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4595,11 +5021,13 @@ class _$ChatCompletionModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelEnumerationImplCopyWith< @@ -4687,7 +5115,10 @@ abstract class ChatCompletionModelEnumeration extends ChatCompletionModel { @override ChatCompletionModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionModelEnumerationImplCopyWith< _$ChatCompletionModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -4713,6 +5144,8 @@ class __$$ChatCompletionModelStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4756,11 +5189,13 @@ class _$ChatCompletionModelStringImpl extends ChatCompletionModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> @@ -4847,167 +5282,11 @@ abstract class ChatCompletionModelString extends ChatCompletionModel { @override String get value; - @JsonKey(ignore: true) - _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ChatCompletionResponseFormat _$ChatCompletionResponseFormatFromJson( - Map json) { - return _ChatCompletionResponseFormat.fromJson(json); -} -/// @nodoc -mixin _$ChatCompletionResponseFormat { - /// Must be one of `text` or `json_object`. - ChatCompletionResponseFormatType get type => - throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ChatCompletionResponseFormatCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionResponseFormatCopyWith<$Res> { - factory $ChatCompletionResponseFormatCopyWith( - ChatCompletionResponseFormat value, - $Res Function(ChatCompletionResponseFormat) then) = - _$ChatCompletionResponseFormatCopyWithImpl<$Res, - ChatCompletionResponseFormat>; - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class _$ChatCompletionResponseFormatCopyWithImpl<$Res, - $Val extends ChatCompletionResponseFormat> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - _$ChatCompletionResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ChatCompletionResponseFormatImplCopyWith<$Res> - implements $ChatCompletionResponseFormatCopyWith<$Res> { - factory _$$ChatCompletionResponseFormatImplCopyWith( - _$ChatCompletionResponseFormatImpl value, - $Res Function(_$ChatCompletionResponseFormatImpl) then) = - __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({ChatCompletionResponseFormatType type}); -} - -/// @nodoc -class __$$ChatCompletionResponseFormatImplCopyWithImpl<$Res> - extends _$ChatCompletionResponseFormatCopyWithImpl<$Res, - _$ChatCompletionResponseFormatImpl> - implements _$$ChatCompletionResponseFormatImplCopyWith<$Res> { - __$$ChatCompletionResponseFormatImplCopyWithImpl( - _$ChatCompletionResponseFormatImpl _value, - $Res Function(_$ChatCompletionResponseFormatImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$ChatCompletionResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$ChatCompletionResponseFormatImpl extends _ChatCompletionResponseFormat { - const _$ChatCompletionResponseFormatImpl( - {this.type = ChatCompletionResponseFormatType.text}) - : super._(); - - factory _$ChatCompletionResponseFormatImpl.fromJson( - Map json) => - _$$ChatCompletionResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final ChatCompletionResponseFormatType type; - - @override - String toString() { - return 'ChatCompletionResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> - get copyWith => __$$ChatCompletionResponseFormatImplCopyWithImpl< - _$ChatCompletionResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$ChatCompletionResponseFormatImplToJson( - this, - ); - } -} - -abstract class _ChatCompletionResponseFormat - extends ChatCompletionResponseFormat { - const factory _ChatCompletionResponseFormat( - {final ChatCompletionResponseFormatType type}) = - _$ChatCompletionResponseFormatImpl; - const _ChatCompletionResponseFormat._() : super._(); - - factory _ChatCompletionResponseFormat.fromJson(Map json) = - _$ChatCompletionResponseFormatImpl.fromJson; - - @override - - /// Must be one of `text` or `json_object`. - ChatCompletionResponseFormatType get type; - @override - @JsonKey(ignore: true) - _$$ChatCompletionResponseFormatImplCopyWith< - _$ChatCompletionResponseFormatImpl> + /// Create a copy of ChatCompletionModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionModelStringImplCopyWith<_$ChatCompletionModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5065,6 +5344,8 @@ mixin _$ChatCompletionStop { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionStop to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5084,6 +5365,9 @@ class _$ChatCompletionStopCopyWithImpl<$Res, $Val extends ChatCompletionStop> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5106,6 +5390,8 @@ class __$$ChatCompletionStopListStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopListStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5157,12 +5443,14 @@ class _$ChatCompletionStopListStringImpl extends ChatCompletionStopListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopListStringImplCopyWith< @@ -5250,7 +5538,10 @@ abstract class ChatCompletionStopListString extends ChatCompletionStop { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopListStringImplCopyWith< _$ChatCompletionStopListStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -5276,6 +5567,8 @@ class __$$ChatCompletionStopStringImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStopStringImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5319,11 +5612,13 @@ class _$ChatCompletionStopStringImpl extends ChatCompletionStopString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> @@ -5410,7 +5705,10 @@ abstract class ChatCompletionStopString extends ChatCompletionStop { @override String? get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStop + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStopStringImplCopyWith<_$ChatCompletionStopStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5481,6 +5779,8 @@ mixin _$ChatCompletionToolChoiceOption { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionToolChoiceOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5503,6 +5803,9 @@ class _$ChatCompletionToolChoiceOptionCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5525,6 +5828,8 @@ class __$$ChatCompletionToolChoiceOptionEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolChoiceOptionEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5571,11 +5876,13 @@ class _$ChatCompletionToolChoiceOptionEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< @@ -5675,7 +5982,10 @@ abstract class ChatCompletionToolChoiceOptionEnumeration @override ChatCompletionToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionEnumerationImplCopyWith< _$ChatCompletionToolChoiceOptionEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -5712,6 +6022,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit _then) : super(_value, _then); + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5726,6 +6038,8 @@ class __$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWit )); } + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionNamedToolChoiceCopyWith<$Res> get value { @@ -5770,11 +6084,13 @@ class _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< @@ -5876,7 +6192,10 @@ abstract class ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice @override ChatCompletionNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionToolChoiceOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionToolChoiceOptionChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -5947,6 +6266,8 @@ mixin _$ChatCompletionFunctionCall { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -5968,6 +6289,9 @@ class _$ChatCompletionFunctionCallCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -5990,6 +6314,8 @@ class __$$ChatCompletionFunctionCallEnumerationImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6036,11 +6362,13 @@ class _$ChatCompletionFunctionCallEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallEnumerationImplCopyWith< @@ -6137,7 +6465,10 @@ abstract class ChatCompletionFunctionCallEnumeration @override ChatCompletionFunctionCallMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallEnumerationImplCopyWith< _$ChatCompletionFunctionCallEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -6174,6 +6505,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6188,6 +6521,8 @@ class __$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith )); } + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get value { @@ -6233,11 +6568,13 @@ class _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< @@ -6337,7 +6674,10 @@ abstract class ChatCompletionFunctionCallChatCompletionFunctionCallOption @override ChatCompletionFunctionCallOption get value; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6356,8 +6696,12 @@ mixin _$ChatCompletionMessageFunctionCall { /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageFunctionCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6384,6 +6728,8 @@ class _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6425,6 +6771,8 @@ class __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6479,11 +6827,13 @@ class _$ChatCompletionMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageFunctionCallImplCopyWith< @@ -6510,16 +6860,18 @@ abstract class _ChatCompletionMessageFunctionCall Map json) = _$ChatCompletionMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. - String get name; @override + String get name; /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override String get arguments; + + /// Create a copy of ChatCompletionMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageFunctionCallImplCopyWith< _$ChatCompletionMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -6535,8 +6887,12 @@ mixin _$ChatCompletionFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6563,6 +6919,8 @@ class _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6599,6 +6957,8 @@ class __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6641,11 +7001,13 @@ class _$ChatCompletionFunctionCallOptionImpl (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionFunctionCallOptionImplCopyWith< @@ -6671,12 +7033,14 @@ abstract class _ChatCompletionFunctionCallOption Map json) = _$ChatCompletionFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of ChatCompletionFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionFunctionCallOptionImplCopyWith< _$ChatCompletionFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; @@ -6688,21 +7052,33 @@ FunctionObject _$FunctionObjectFromJson(Map json) { /// @nodoc mixin _$FunctionObject { - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. String get name => throw _privateConstructorUsedError; /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? get description => throw _privateConstructorUsedError; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) Map? get parameters => throw _privateConstructorUsedError; + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + + /// Serializes this FunctionObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FunctionObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6716,7 +7092,8 @@ abstract class $FunctionObjectCopyWith<$Res> { $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -6729,12 +7106,15 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_value.copyWith( name: null == name @@ -6749,6 +7129,10 @@ class _$FunctionObjectCopyWithImpl<$Res, $Val extends FunctionObject> ? _value.parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, ) as $Val); } } @@ -6764,7 +7148,8 @@ abstract class _$$FunctionObjectImplCopyWith<$Res> $Res call( {String name, @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) Map? parameters}); + @JsonKey(includeIfNull: false) Map? parameters, + @JsonKey(includeIfNull: false) bool? strict}); } /// @nodoc @@ -6775,12 +7160,15 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> _$FunctionObjectImpl _value, $Res Function(_$FunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? name = null, Object? description = freezed, Object? parameters = freezed, + Object? strict = freezed, }) { return _then(_$FunctionObjectImpl( name: null == name @@ -6795,6 +7183,10 @@ class __$$FunctionObjectImplCopyWithImpl<$Res> ? _value._parameters : parameters // ignore: cast_nullable_to_non_nullable as Map?, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, )); } } @@ -6805,14 +7197,16 @@ class _$FunctionObjectImpl extends _FunctionObject { const _$FunctionObjectImpl( {required this.name, @JsonKey(includeIfNull: false) this.description, - @JsonKey(includeIfNull: false) final Map? parameters}) + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) this.strict = false}) : _parameters = parameters, super._(); factory _$FunctionObjectImpl.fromJson(Map json) => _$$FunctionObjectImplFromJson(json); - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override final String name; @@ -6821,12 +7215,12 @@ class _$FunctionObjectImpl extends _FunctionObject { @JsonKey(includeIfNull: false) final String? description; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. final Map? _parameters; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @override @@ -6839,9 +7233,17 @@ class _$FunctionObjectImpl extends _FunctionObject { return EqualUnmodifiableMapView(value); } + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + @override String toString() { - return 'FunctionObject(name: $name, description: $description, parameters: $parameters)'; + return 'FunctionObject(name: $name, description: $description, parameters: $parameters, strict: $strict)'; } @override @@ -6853,15 +7255,18 @@ class _$FunctionObjectImpl extends _FunctionObject { (identical(other.description, description) || other.description == description) && const DeepCollectionEquality() - .equals(other._parameters, _parameters)); + .equals(other._parameters, _parameters) && + (identical(other.strict, strict) || other.strict == strict)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, description, - const DeepCollectionEquality().hash(_parameters)); + const DeepCollectionEquality().hash(_parameters), strict); - @JsonKey(ignore: true) + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => @@ -6878,37 +7283,318 @@ class _$FunctionObjectImpl extends _FunctionObject { abstract class _FunctionObject extends FunctionObject { const factory _FunctionObject( - {required final String name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) - final Map? parameters}) = _$FunctionObjectImpl; + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) final Map? parameters, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$FunctionObjectImpl; const _FunctionObject._() : super._(); factory _FunctionObject.fromJson(Map json) = _$FunctionObjectImpl.fromJson; + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + /// maximum length of 64. @override - - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. String get name; - @override /// A description of what the function does, used by the model to choose when and how to call the function. + @override @JsonKey(includeIfNull: false) String? get description; - @override - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. + @override @JsonKey(includeIfNull: false) Map? get parameters; + + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will + /// follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. Learn more about Structured Outputs in the + /// [function calling guide](](https://platform.openai.com/docs/guides/function-calling). @override - @JsonKey(ignore: true) + @JsonKey(includeIfNull: false) + bool? get strict; + + /// Create a copy of FunctionObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$FunctionObjectImplCopyWith<_$FunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } +JsonSchemaObject _$JsonSchemaObjectFromJson(Map json) { + return _JsonSchemaObject.fromJson(json); +} + +/// @nodoc +mixin _$JsonSchemaObject { + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + String get name => throw _privateConstructorUsedError; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// The schema for the response format, described as a JSON Schema object. + Map get schema => throw _privateConstructorUsedError; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @JsonKey(includeIfNull: false) + bool? get strict => throw _privateConstructorUsedError; + + /// Serializes this JsonSchemaObject to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $JsonSchemaObjectCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $JsonSchemaObjectCopyWith<$Res> { + factory $JsonSchemaObjectCopyWith( + JsonSchemaObject value, $Res Function(JsonSchemaObject) then) = + _$JsonSchemaObjectCopyWithImpl<$Res, JsonSchemaObject>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class _$JsonSchemaObjectCopyWithImpl<$Res, $Val extends JsonSchemaObject> + implements $JsonSchemaObjectCopyWith<$Res> { + _$JsonSchemaObjectCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value.schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$JsonSchemaObjectImplCopyWith<$Res> + implements $JsonSchemaObjectCopyWith<$Res> { + factory _$$JsonSchemaObjectImplCopyWith(_$JsonSchemaObjectImpl value, + $Res Function(_$JsonSchemaObjectImpl) then) = + __$$JsonSchemaObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map schema, + @JsonKey(includeIfNull: false) bool? strict}); +} + +/// @nodoc +class __$$JsonSchemaObjectImplCopyWithImpl<$Res> + extends _$JsonSchemaObjectCopyWithImpl<$Res, _$JsonSchemaObjectImpl> + implements _$$JsonSchemaObjectImplCopyWith<$Res> { + __$$JsonSchemaObjectImplCopyWithImpl(_$JsonSchemaObjectImpl _value, + $Res Function(_$JsonSchemaObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? schema = null, + Object? strict = freezed, + }) { + return _then(_$JsonSchemaObjectImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + schema: null == schema + ? _value._schema + : schema // ignore: cast_nullable_to_non_nullable + as Map, + strict: freezed == strict + ? _value.strict + : strict // ignore: cast_nullable_to_non_nullable + as bool?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$JsonSchemaObjectImpl extends _JsonSchemaObject { + const _$JsonSchemaObjectImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + required final Map schema, + @JsonKey(includeIfNull: false) this.strict = false}) + : _schema = schema, + super._(); + + factory _$JsonSchemaObjectImpl.fromJson(Map json) => + _$$JsonSchemaObjectImplFromJson(json); + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + final String name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// The schema for the response format, described as a JSON Schema object. + final Map _schema; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema { + if (_schema is EqualUnmodifiableMapView) return _schema; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_schema); + } + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + final bool? strict; + + @override + String toString() { + return 'JsonSchemaObject(name: $name, description: $description, schema: $schema, strict: $strict)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$JsonSchemaObjectImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality().equals(other._schema, _schema) && + (identical(other.strict, strict) || other.strict == strict)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_schema), strict); + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + __$$JsonSchemaObjectImplCopyWithImpl<_$JsonSchemaObjectImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$JsonSchemaObjectImplToJson( + this, + ); + } +} + +abstract class _JsonSchemaObject extends JsonSchemaObject { + const factory _JsonSchemaObject( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + required final Map schema, + @JsonKey(includeIfNull: false) final bool? strict}) = + _$JsonSchemaObjectImpl; + const _JsonSchemaObject._() : super._(); + + factory _JsonSchemaObject.fromJson(Map json) = + _$JsonSchemaObjectImpl.fromJson; + + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + /// length of 64. + @override + String get name; + + /// A description of what the response format is for, used by the model to determine how to respond in the + /// format. + @override + @JsonKey(includeIfNull: false) + String? get description; + + /// The schema for the response format, described as a JSON Schema object. + @override + Map get schema; + + /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always + /// follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + @override + @JsonKey(includeIfNull: false) + bool? get strict; + + /// Create a copy of JsonSchemaObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$JsonSchemaObjectImplCopyWith<_$JsonSchemaObjectImpl> get copyWith => + throw _privateConstructorUsedError; +} + ChatCompletionTool _$ChatCompletionToolFromJson(Map json) { return _ChatCompletionTool.fromJson(json); } @@ -6921,8 +7607,12 @@ mixin _$ChatCompletionTool { /// A function that the model may call. FunctionObject get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTool to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionToolCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6948,6 +7638,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6966,6 +7658,8 @@ class _$ChatCompletionToolCopyWithImpl<$Res, $Val extends ChatCompletionTool> ) as $Val); } + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FunctionObjectCopyWith<$Res> get function { @@ -6997,6 +7691,8 @@ class __$$ChatCompletionToolImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionToolImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7048,11 +7744,13 @@ class _$ChatCompletionToolImpl extends _ChatCompletionTool { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => @@ -7076,16 +7774,18 @@ abstract class _ChatCompletionTool extends ChatCompletionTool { factory _ChatCompletionTool.fromJson(Map json) = _$ChatCompletionToolImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionToolType get type; @override + ChatCompletionToolType get type; /// A function that the model may call. + @override FunctionObject get function; + + /// Create a copy of ChatCompletionTool + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionToolImplCopyWith<_$ChatCompletionToolImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7105,8 +7805,12 @@ mixin _$ChatCompletionNamedToolChoice { ChatCompletionFunctionCallOption get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7137,6 +7841,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7155,6 +7861,8 @@ class _$ChatCompletionNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionFunctionCallOptionCopyWith<$Res> get function { @@ -7192,6 +7900,8 @@ class __$$ChatCompletionNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7246,11 +7956,13 @@ class _$ChatCompletionNamedToolChoiceImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionNamedToolChoiceImplCopyWith< @@ -7277,16 +7989,18 @@ abstract class _ChatCompletionNamedToolChoice factory _ChatCompletionNamedToolChoice.fromJson(Map json) = _$ChatCompletionNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. Currently, only `function` is supported. - ChatCompletionNamedToolChoiceType get type; @override + ChatCompletionNamedToolChoiceType get type; /// Forces the model to call the specified function. + @override ChatCompletionFunctionCallOption get function; + + /// Create a copy of ChatCompletionNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionNamedToolChoiceImplCopyWith< _$ChatCompletionNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -7310,8 +8024,12 @@ mixin _$ChatCompletionMessageToolCall { ChatCompletionMessageFunctionCall get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageToolCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionMessageToolCallCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7343,6 +8061,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7366,6 +8086,8 @@ class _$ChatCompletionMessageToolCallCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionMessageFunctionCallCopyWith<$Res> get function { @@ -7404,6 +8126,8 @@ class __$$ChatCompletionMessageToolCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionMessageToolCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7468,11 +8192,13 @@ class _$ChatCompletionMessageToolCallImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionMessageToolCallImplCopyWith< @@ -7500,20 +8226,22 @@ abstract class _ChatCompletionMessageToolCall factory _ChatCompletionMessageToolCall.fromJson(Map json) = _$ChatCompletionMessageToolCallImpl.fromJson; - @override - /// The ID of the tool call. - String get id; @override + String get id; /// The type of the tool. Currently, only `function` is supported. - ChatCompletionMessageToolCallType get type; @override + ChatCompletionMessageToolCallType get type; /// The name and arguments of a function that should be called, as generated by the model. + @override ChatCompletionMessageFunctionCall get function; + + /// Create a copy of ChatCompletionMessageToolCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionMessageToolCallImplCopyWith< _$ChatCompletionMessageToolCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -7530,8 +8258,12 @@ mixin _$ChatCompletionStreamOptions { @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamOptions to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamOptionsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7560,6 +8292,8 @@ class _$ChatCompletionStreamOptionsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7598,6 +8332,8 @@ class __$$ChatCompletionStreamOptionsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamOptionsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7642,11 +8378,13 @@ class _$ChatCompletionStreamOptionsImpl extends _ChatCompletionStreamOptions { other.includeUsage == includeUsage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, includeUsage); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> @@ -7671,13 +8409,15 @@ abstract class _ChatCompletionStreamOptions factory _ChatCompletionStreamOptions.fromJson(Map json) = _$ChatCompletionStreamOptionsImpl.fromJson; - @override - /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + @override @JsonKey(name: 'include_usage', includeIfNull: false) bool? get includeUsage; + + /// Create a copy of ChatCompletionStreamOptions + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamOptionsImplCopyWith<_$ChatCompletionStreamOptionsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -7703,6 +8443,14 @@ mixin _$CreateChatCompletionResponse { /// The model used for the chat completion. String get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7716,8 +8464,12 @@ mixin _$CreateChatCompletionResponse { @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -7735,6 +8487,11 @@ abstract class $CreateChatCompletionResponseCopyWith<$Res> { List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7754,6 +8511,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7761,6 +8520,7 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7782,6 +8542,10 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7797,6 +8561,8 @@ class _$CreateChatCompletionResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -7824,6 +8590,11 @@ abstract class _$$CreateChatCompletionResponseImplCopyWith<$Res> List choices, int created, String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, String object, @@ -7843,6 +8614,8 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7850,6 +8623,7 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> Object? choices = null, Object? created = null, Object? model = null, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, Object? object = null, Object? usage = freezed, @@ -7871,6 +8645,10 @@ class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> ? _value.model : model // ignore: cast_nullable_to_non_nullable as String, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable @@ -7895,6 +8673,11 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { required final List choices, required this.created, required this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, required this.object, @@ -7930,6 +8713,15 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override final String model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. @@ -7948,7 +8740,7 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { @override String toString() { - return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -7960,13 +8752,15 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -7974,11 +8768,14 @@ class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionResponseImplCopyWith< @@ -8001,6 +8798,11 @@ abstract class _CreateChatCompletionResponse required final List choices, required final int created, required final String model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, required final String object, @@ -8011,41 +8813,52 @@ abstract class _CreateChatCompletionResponse factory _CreateChatCompletionResponse.fromJson(Map json) = _$CreateChatCompletionResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can be more than one if `n` is greater than 1. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. - int get created; @override + int get created; /// The model used for the chat completion. + @override String get model; + + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion`. - String get object; @override + String get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionResponseImplCopyWith< _$CreateChatCompletionResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -8079,8 +8892,12 @@ mixin _$ChatCompletionResponseChoice { /// Log probability information for the choice. ChatCompletionLogprobs? get logprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionResponseChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8116,6 +8933,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8144,6 +8963,8 @@ class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionLogprobsCopyWith<$Res>? get logprobs { @@ -8189,6 +9010,8 @@ class __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8276,12 +9099,14 @@ class _$ChatCompletionResponseChoiceImpl extends _ChatCompletionResponseChoice { other.logprobs == logprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, finishReason, index, const DeepCollectionEquality().hash(message), logprobs); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionResponseChoiceImplCopyWith< @@ -8313,31 +9138,33 @@ abstract class _ChatCompletionResponseChoice factory _ChatCompletionResponseChoice.fromJson(Map json) = _$ChatCompletionResponseChoiceImpl.fromJson; - @override - /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; - @override /// An assistant message in a chat conversation. - ChatCompletionAssistantMessage get message; @override + ChatCompletionAssistantMessage get message; /// Log probability information for the choice. + @override ChatCompletionLogprobs? get logprobs; + + /// Create a copy of ChatCompletionResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionResponseChoiceImplCopyWith< _$ChatCompletionResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -8351,11 +9178,21 @@ ChatCompletionLogprobs _$ChatCompletionLogprobsFromJson( /// @nodoc mixin _$ChatCompletionLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionLogprobsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8366,7 +9203,10 @@ abstract class $ChatCompletionLogprobsCopyWith<$Res> { $Res Function(ChatCompletionLogprobs) then) = _$ChatCompletionLogprobsCopyWithImpl<$Res, ChatCompletionLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -8380,16 +9220,23 @@ class _$ChatCompletionLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -8403,7 +9250,10 @@ abstract class _$$ChatCompletionLogprobsImplCopyWith<$Res> __$$ChatCompletionLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -8416,16 +9266,23 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -8434,8 +9291,12 @@ class __$$ChatCompletionLogprobsImplCopyWithImpl<$Res> @JsonSerializable() class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { const _$ChatCompletionLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionLogprobsImpl.fromJson(Map json) => @@ -8446,6 +9307,7 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -8454,9 +9316,23 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionLogprobs(content: $content)'; + return 'ChatCompletionLogprobs(content: $content, refusal: $refusal)'; } @override @@ -8464,15 +9340,20 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> @@ -8489,19 +9370,30 @@ class _$ChatCompletionLogprobsImpl extends _ChatCompletionLogprobs { abstract class _ChatCompletionLogprobs extends ChatCompletionLogprobs { const factory _ChatCompletionLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionLogprobsImpl; const _ChatCompletionLogprobs._() : super._(); factory _ChatCompletionLogprobs.fromJson(Map json) = _$ChatCompletionLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override + @JsonKey(includeIfNull: false) List? get content; + + /// A list of message refusal tokens with log probability information. @override - @JsonKey(ignore: true) + @JsonKey(includeIfNull: false) + List? get refusal; + + /// Create a copy of ChatCompletionLogprobs + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionLogprobsImplCopyWith<_$ChatCompletionLogprobsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8527,8 +9419,12 @@ mixin _$ChatCompletionTokenLogprob { List get topLogprobs => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8559,6 +9455,8 @@ class _$ChatCompletionTokenLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8615,6 +9513,8 @@ class __$$ChatCompletionTokenLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8711,7 +9611,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { .equals(other._topLogprobs, _topLogprobs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -8720,7 +9620,9 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { const DeepCollectionEquality().hash(_bytes), const DeepCollectionEquality().hash(_topLogprobs)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> @@ -8748,25 +9650,27 @@ abstract class _ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob { factory _ChatCompletionTokenLogprob.fromJson(Map json) = _$ChatCompletionTokenLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - List? get bytes; @override + List? get bytes; /// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + @override @JsonKey(name: 'top_logprobs') List get topLogprobs; + + /// Create a copy of ChatCompletionTokenLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenLogprobImplCopyWith<_$ChatCompletionTokenLogprobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -8787,8 +9691,12 @@ mixin _$ChatCompletionTokenTopLogprob { /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. List? get bytes => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionTokenTopLogprob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionTokenTopLogprobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -8815,6 +9723,8 @@ class _$ChatCompletionTokenTopLogprobCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8861,6 +9771,8 @@ class __$$ChatCompletionTokenTopLogprobImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionTokenTopLogprobImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -8936,12 +9848,14 @@ class _$ChatCompletionTokenTopLogprobImpl const DeepCollectionEquality().equals(other._bytes, _bytes)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, token, logprob, const DeepCollectionEquality().hash(_bytes)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionTokenTopLogprobImplCopyWith< @@ -8968,20 +9882,22 @@ abstract class _ChatCompletionTokenTopLogprob factory _ChatCompletionTokenTopLogprob.fromJson(Map json) = _$ChatCompletionTokenTopLogprobImpl.fromJson; - @override - /// The token. - String get token; @override + String get token; /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - double get logprob; @override + double get logprob; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + @override List? get bytes; + + /// Create a copy of ChatCompletionTokenTopLogprob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionTokenTopLogprobImplCopyWith< _$ChatCompletionTokenTopLogprobImpl> get copyWith => throw _privateConstructorUsedError; @@ -9004,12 +9920,21 @@ mixin _$CreateChatCompletionStreamResponse { throw _privateConstructorUsedError; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + int? get created => throw _privateConstructorUsedError; /// The model to generate the completion. @JsonKey(includeIfNull: false) String? get model => throw _privateConstructorUsedError; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier => throw _privateConstructorUsedError; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9017,14 +9942,19 @@ mixin _$CreateChatCompletionStreamResponse { String? get systemFingerprint => throw _privateConstructorUsedError; /// The object type, which is always `chat.completion.chunk`. - String get object => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateChatCompletionStreamResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateChatCompletionStreamResponseCopyWith< CreateChatCompletionStreamResponse> get copyWith => throw _privateConstructorUsedError; @@ -9041,11 +9971,16 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); $CompletionUsageCopyWith<$Res>? get usage; @@ -9062,15 +9997,18 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_value.copyWith( @@ -9082,22 +10020,26 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9105,6 +10047,8 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CompletionUsageCopyWith<$Res>? get usage { @@ -9130,11 +10074,16 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); @override @@ -9151,15 +10100,18 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res Function(_$CreateChatCompletionStreamResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, + Object? serviceTier = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_$CreateChatCompletionStreamResponseImpl( @@ -9171,22 +10123,26 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value._choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, + serviceTier: freezed == serviceTier + ? _value.serviceTier + : serviceTier // ignore: cast_nullable_to_non_nullable + as ServiceTier?, systemFingerprint: freezed == systemFingerprint ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9202,11 +10158,16 @@ class _$CreateChatCompletionStreamResponseImpl const _$CreateChatCompletionStreamResponseImpl( {@JsonKey(includeIfNull: false) this.id, required final List choices, - required this.created, + @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, - required this.object, + @JsonKey(includeIfNull: false) this.object, @JsonKey(includeIfNull: false) this.usage}) : _choices = choices, super._(); @@ -9235,13 +10196,23 @@ class _$CreateChatCompletionStreamResponseImpl /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. @override - final int created; + @JsonKey(includeIfNull: false) + final int? created; /// The model to generate the completion. @override @JsonKey(includeIfNull: false) final String? model; + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. + @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier; + /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact @@ -9251,7 +10222,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The object type, which is always `chat.completion.chunk`. @override - final String object; + @JsonKey(includeIfNull: false) + final String? object; /// Usage statistics for the completion request. @override @@ -9260,7 +10232,7 @@ class _$CreateChatCompletionStreamResponseImpl @override String toString() { - return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; + return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, serviceTier: $serviceTier, systemFingerprint: $systemFingerprint, object: $object, usage: $usage)'; } @override @@ -9272,13 +10244,15 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().equals(other._choices, _choices) && (identical(other.created, created) || other.created == created) && (identical(other.model, model) || other.model == model) && + (identical(other.serviceTier, serviceTier) || + other.serviceTier == serviceTier) && (identical(other.systemFingerprint, systemFingerprint) || other.systemFingerprint == systemFingerprint) && (identical(other.object, object) || other.object == object) && (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -9286,11 +10260,14 @@ class _$CreateChatCompletionStreamResponseImpl const DeepCollectionEquality().hash(_choices), created, model, + serviceTier, systemFingerprint, object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateChatCompletionStreamResponseImplCopyWith< @@ -9311,11 +10288,16 @@ abstract class _CreateChatCompletionStreamResponse const factory _CreateChatCompletionStreamResponse( {@JsonKey(includeIfNull: false) final String? id, required final List choices, - required final int created, + @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ServiceTier? serviceTier, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, - required final String object, + @JsonKey(includeIfNull: false) final String? object, @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = _$CreateChatCompletionStreamResponseImpl; const _CreateChatCompletionStreamResponse._() : super._(); @@ -9324,43 +10306,56 @@ abstract class _CreateChatCompletionStreamResponse Map json) = _$CreateChatCompletionStreamResponseImpl.fromJson; - @override - /// A unique identifier for the chat completion. Each chunk has the same ID. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the /// last chunk if you set `stream_options: {"include_usage": true}`. - List get choices; @override + List get choices; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created; @override + @JsonKey(includeIfNull: false) + int? get created; /// The model to generate the completion. + @override @JsonKey(includeIfNull: false) String? get model; + + /// The service tier used for processing the request. This field is only included if the `service_tier` parameter + /// is specified in the request. @override + @JsonKey( + name: 'service_tier', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ServiceTier? get serviceTier; /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact + @override @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? get systemFingerprint; - @override /// The object type, which is always `chat.completion.chunk`. - String get object; @override + @JsonKey(includeIfNull: false) + String? get object; /// Usage statistics for the completion request. + @override @JsonKey(includeIfNull: false) CompletionUsage? get usage; + + /// Create a copy of CreateChatCompletionStreamResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateChatCompletionStreamResponseImplCopyWith< _$CreateChatCompletionStreamResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -9396,8 +10391,12 @@ mixin _$ChatCompletionStreamResponseChoice { @JsonKey(includeIfNull: false) int? get index => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceCopyWith< ChatCompletionStreamResponseChoice> get copyWith => throw _privateConstructorUsedError; @@ -9436,6 +10435,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9464,6 +10465,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta { @@ -9473,6 +10476,8 @@ class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, }); } + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res>? get logprobs { @@ -9522,6 +10527,8 @@ class __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -9611,12 +10618,14 @@ class _$ChatCompletionStreamResponseChoiceImpl (identical(other.index, index) || other.index == index)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, delta, logprobs, finishReason, index); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceImplCopyWith< @@ -9650,32 +10659,34 @@ abstract class _ChatCompletionStreamResponseChoice Map json) = _$ChatCompletionStreamResponseChoiceImpl.fromJson; - @override - /// A chat completion delta generated by streamed model responses. - ChatCompletionStreamResponseDelta get delta; @override + ChatCompletionStreamResponseDelta get delta; /// Log probability information for the choice. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamResponseChoiceLogprobs? get logprobs; - @override /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, /// `length` if the maximum number of tokens specified in the request was reached, /// `content_filter` if content was omitted due to a flag from our content filters, /// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + @override @JsonKey( name: 'finish_reason', unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionFinishReason? get finishReason; - @override /// The index of the choice in the list of choices. + @override @JsonKey(includeIfNull: false) int? get index; + + /// Create a copy of ChatCompletionStreamResponseChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceImplCopyWith< _$ChatCompletionStreamResponseChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -9690,11 +10701,21 @@ ChatCompletionStreamResponseChoiceLogprobs /// @nodoc mixin _$ChatCompletionStreamResponseChoiceLogprobs { /// A list of message content tokens with log probability information. + @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; + /// A list of message refusal tokens with log probability information. + @JsonKey(includeIfNull: false) + List? get refusal => + throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionStreamResponseChoiceLogprobs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseChoiceLogprobsCopyWith< ChatCompletionStreamResponseChoiceLogprobs> get copyWith => throw _privateConstructorUsedError; @@ -9708,7 +10729,10 @@ abstract class $ChatCompletionStreamResponseChoiceLogprobsCopyWith<$Res> { _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, ChatCompletionStreamResponseChoiceLogprobs>; @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9723,16 +10747,23 @@ class _$ChatCompletionStreamResponseChoiceLogprobsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_value.copyWith( content: freezed == content ? _value.content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } } @@ -9747,7 +10778,10 @@ abstract class _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith<$Res> __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res>; @override @useResult - $Res call({List? content}); + $Res call( + {@JsonKey(includeIfNull: false) List? content, + @JsonKey(includeIfNull: false) + List? refusal}); } /// @nodoc @@ -9760,16 +10794,23 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseChoiceLogprobsImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, }) { return _then(_$ChatCompletionStreamResponseChoiceLogprobsImpl( content: freezed == content ? _value._content : content // ignore: cast_nullable_to_non_nullable as List?, + refusal: freezed == refusal + ? _value._refusal + : refusal // ignore: cast_nullable_to_non_nullable + as List?, )); } } @@ -9779,8 +10820,12 @@ class __$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWithImpl<$Res> class _$ChatCompletionStreamResponseChoiceLogprobsImpl extends _ChatCompletionStreamResponseChoiceLogprobs { const _$ChatCompletionStreamResponseChoiceLogprobsImpl( - {required final List? content}) + {@JsonKey(includeIfNull: false) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) : _content = content, + _refusal = refusal, super._(); factory _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson( @@ -9792,6 +10837,7 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl /// A list of message content tokens with log probability information. @override + @JsonKey(includeIfNull: false) List? get content { final value = _content; if (value == null) return null; @@ -9800,9 +10846,23 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return EqualUnmodifiableListView(value); } + /// A list of message refusal tokens with log probability information. + final List? _refusal; + + /// A list of message refusal tokens with log probability information. + @override + @JsonKey(includeIfNull: false) + List? get refusal { + final value = _refusal; + if (value == null) return null; + if (_refusal is EqualUnmodifiableListView) return _refusal; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + @override String toString() { - return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content)'; + return 'ChatCompletionStreamResponseChoiceLogprobs(content: $content, refusal: $refusal)'; } @override @@ -9810,15 +10870,20 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseChoiceLogprobsImpl && - const DeepCollectionEquality().equals(other._content, _content)); + const DeepCollectionEquality().equals(other._content, _content) && + const DeepCollectionEquality().equals(other._refusal, _refusal)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_content)); + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_content), + const DeepCollectionEquality().hash(_refusal)); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< @@ -9839,7 +10904,10 @@ class _$ChatCompletionStreamResponseChoiceLogprobsImpl abstract class _ChatCompletionStreamResponseChoiceLogprobs extends ChatCompletionStreamResponseChoiceLogprobs { const factory _ChatCompletionStreamResponseChoiceLogprobs( - {required final List? content}) = + {@JsonKey(includeIfNull: false) + final List? content, + @JsonKey(includeIfNull: false) + final List? refusal}) = _$ChatCompletionStreamResponseChoiceLogprobsImpl; const _ChatCompletionStreamResponseChoiceLogprobs._() : super._(); @@ -9847,12 +10915,20 @@ abstract class _ChatCompletionStreamResponseChoiceLogprobs Map json) = _$ChatCompletionStreamResponseChoiceLogprobsImpl.fromJson; - @override - /// A list of message content tokens with log probability information. + @override + @JsonKey(includeIfNull: false) List? get content; + + /// A list of message refusal tokens with log probability information. @override - @JsonKey(ignore: true) + @JsonKey(includeIfNull: false) + List? get refusal; + + /// Create a copy of ChatCompletionStreamResponseChoiceLogprobs + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseChoiceLogprobsImplCopyWith< _$ChatCompletionStreamResponseChoiceLogprobsImpl> get copyWith => throw _privateConstructorUsedError; @@ -9869,6 +10945,10 @@ mixin _$ChatCompletionStreamResponseDelta { @JsonKey(includeIfNull: false) String? get content => throw _privateConstructorUsedError; + /// The refusal message generated by the model. + @JsonKey(includeIfNull: false) + String? get refusal => throw _privateConstructorUsedError; + /// The name and arguments of a function that should be called, as generated by the model. @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall => @@ -9884,8 +10964,12 @@ mixin _$ChatCompletionStreamResponseDelta { includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamResponseDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamResponseDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -9900,6 +10984,7 @@ abstract class $ChatCompletionStreamResponseDeltaCopyWith<$Res> { @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -9923,10 +11008,13 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -9936,6 +11024,10 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -9951,6 +11043,8 @@ class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get functionCall { @@ -9976,6 +11070,7 @@ abstract class _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> @useResult $Res call( {@JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -9999,10 +11094,13 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamResponseDeltaImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? content = freezed, + Object? refusal = freezed, Object? functionCall = freezed, Object? toolCalls = freezed, Object? role = freezed, @@ -10012,6 +11110,10 @@ class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> ? _value.content : content // ignore: cast_nullable_to_non_nullable as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, functionCall: freezed == functionCall ? _value.functionCall : functionCall // ignore: cast_nullable_to_non_nullable @@ -10034,6 +11136,7 @@ class _$ChatCompletionStreamResponseDeltaImpl extends _ChatCompletionStreamResponseDelta { const _$ChatCompletionStreamResponseDeltaImpl( {@JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) final List? toolCalls, @@ -10053,6 +11156,11 @@ class _$ChatCompletionStreamResponseDeltaImpl @JsonKey(includeIfNull: false) final String? content; + /// The refusal message generated by the model. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + /// The name and arguments of a function that should be called, as generated by the model. @override @JsonKey(name: 'function_call', includeIfNull: false) @@ -10080,7 +11188,7 @@ class _$ChatCompletionStreamResponseDeltaImpl @override String toString() { - return 'ChatCompletionStreamResponseDelta(content: $content, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; + return 'ChatCompletionStreamResponseDelta(content: $content, refusal: $refusal, functionCall: $functionCall, toolCalls: $toolCalls, role: $role)'; } @override @@ -10089,6 +11197,7 @@ class _$ChatCompletionStreamResponseDeltaImpl (other.runtimeType == runtimeType && other is _$ChatCompletionStreamResponseDeltaImpl && (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && (identical(other.functionCall, functionCall) || other.functionCall == functionCall) && const DeepCollectionEquality() @@ -10096,12 +11205,14 @@ class _$ChatCompletionStreamResponseDeltaImpl (identical(other.role, role) || other.role == role)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, content, functionCall, + int get hashCode => Object.hash(runtimeType, content, refusal, functionCall, const DeepCollectionEquality().hash(_toolCalls), role); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamResponseDeltaImplCopyWith< @@ -10121,6 +11232,7 @@ abstract class _ChatCompletionStreamResponseDelta extends ChatCompletionStreamResponseDelta { const factory _ChatCompletionStreamResponseDelta( {@JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, @JsonKey(name: 'function_call', includeIfNull: false) final ChatCompletionStreamMessageFunctionCall? functionCall, @JsonKey(name: 'tool_calls', includeIfNull: false) @@ -10136,29 +11248,36 @@ abstract class _ChatCompletionStreamResponseDelta Map json) = _$ChatCompletionStreamResponseDeltaImpl.fromJson; - @override - /// The contents of the chunk message. + @override @JsonKey(includeIfNull: false) String? get content; + + /// The refusal message generated by the model. @override + @JsonKey(includeIfNull: false) + String? get refusal; /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(name: 'function_call', includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get functionCall; - @override /// No Description + @override @JsonKey(name: 'tool_calls', includeIfNull: false) List? get toolCalls; - @override /// The role of the messages author. One of `system`, `user`, `assistant`, or `tool` (`function` is deprecated). + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionMessageRole? get role; + + /// Create a copy of ChatCompletionStreamResponseDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamResponseDeltaImplCopyWith< _$ChatCompletionStreamResponseDeltaImpl> get copyWith => throw _privateConstructorUsedError; @@ -10180,8 +11299,12 @@ mixin _$ChatCompletionStreamMessageFunctionCall { @JsonKey(includeIfNull: false) String? get arguments => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageFunctionCall to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageFunctionCallCopyWith< ChatCompletionStreamMessageFunctionCall> get copyWith => throw _privateConstructorUsedError; @@ -10212,6 +11335,8 @@ class _$ChatCompletionStreamMessageFunctionCallCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10255,6 +11380,8 @@ class __$$ChatCompletionStreamMessageFunctionCallImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageFunctionCallImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10312,11 +11439,13 @@ class _$ChatCompletionStreamMessageFunctionCallImpl other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< @@ -10345,18 +11474,20 @@ abstract class _ChatCompletionStreamMessageFunctionCall Map json) = _$ChatCompletionStreamMessageFunctionCallImpl.fromJson; - @override - /// The name of the function to call. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override @JsonKey(includeIfNull: false) String? get arguments; + + /// Create a copy of ChatCompletionStreamMessageFunctionCall + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageFunctionCallImplCopyWith< _$ChatCompletionStreamMessageFunctionCallImpl> get copyWith => throw _privateConstructorUsedError; @@ -10388,8 +11519,12 @@ mixin _$ChatCompletionStreamMessageToolCallChunk { ChatCompletionStreamMessageFunctionCall? get function => throw _privateConstructorUsedError; + /// Serializes this ChatCompletionStreamMessageToolCallChunk to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ChatCompletionStreamMessageToolCallChunkCopyWith< ChatCompletionStreamMessageToolCallChunk> get copyWith => throw _privateConstructorUsedError; @@ -10428,6 +11563,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10456,6 +11593,8 @@ class _$ChatCompletionStreamMessageToolCallChunkCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ChatCompletionStreamMessageFunctionCallCopyWith<$Res>? get function { @@ -10503,6 +11642,8 @@ class __$$ChatCompletionStreamMessageToolCallChunkImplCopyWithImpl<$Res> $Res Function(_$ChatCompletionStreamMessageToolCallChunkImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10587,11 +11728,13 @@ class _$ChatCompletionStreamMessageToolCallChunkImpl other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< @@ -10626,28 +11769,30 @@ abstract class _ChatCompletionStreamMessageToolCallChunk Map json) = _$ChatCompletionStreamMessageToolCallChunkImpl.fromJson; - @override - /// No Description - int get index; @override + int get index; /// The ID of the tool call. + @override @JsonKey(includeIfNull: false) String? get id; - @override /// The type of the tool. Currently, only `function` is supported. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ChatCompletionStreamMessageToolCallChunkType? get type; - @override /// The name and arguments of a function that should be called, as generated by the model. + @override @JsonKey(includeIfNull: false) ChatCompletionStreamMessageFunctionCall? get function; + + /// Create a copy of ChatCompletionStreamMessageToolCallChunk + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ChatCompletionStreamMessageToolCallChunkImplCopyWith< _$ChatCompletionStreamMessageToolCallChunkImpl> get copyWith => throw _privateConstructorUsedError; @@ -10671,8 +11816,17 @@ mixin _$CompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Breakdown of tokens used in a completion. + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails => + throw _privateConstructorUsedError; + + /// Serializes this CompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10686,7 +11840,11 @@ abstract class $CompletionUsageCopyWith<$Res> { $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -10699,12 +11857,15 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_value.copyWith( completionTokens: freezed == completionTokens @@ -10719,8 +11880,27 @@ class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, ) as $Val); } + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails { + if (_value.completionTokensDetails == null) { + return null; + } + + return $CompletionTokensDetailsCopyWith<$Res>( + _value.completionTokensDetails!, (value) { + return _then(_value.copyWith(completionTokensDetails: value) as $Val); + }); + } } /// @nodoc @@ -10734,7 +11914,12 @@ abstract class _$$CompletionUsageImplCopyWith<$Res> $Res call( {@JsonKey(name: 'completion_tokens') int? completionTokens, @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + @JsonKey(name: 'total_tokens') int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? completionTokensDetails}); + + @override + $CompletionTokensDetailsCopyWith<$Res>? get completionTokensDetails; } /// @nodoc @@ -10745,12 +11930,15 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> _$CompletionUsageImpl _value, $Res Function(_$CompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? completionTokens = freezed, Object? promptTokens = null, Object? totalTokens = null, + Object? completionTokensDetails = freezed, }) { return _then(_$CompletionUsageImpl( completionTokens: freezed == completionTokens @@ -10765,6 +11953,10 @@ class __$$CompletionUsageImplCopyWithImpl<$Res> ? _value.totalTokens : totalTokens // ignore: cast_nullable_to_non_nullable as int, + completionTokensDetails: freezed == completionTokensDetails + ? _value.completionTokensDetails + : completionTokensDetails // ignore: cast_nullable_to_non_nullable + as CompletionTokensDetails?, )); } } @@ -10775,7 +11967,9 @@ class _$CompletionUsageImpl extends _CompletionUsage { const _$CompletionUsageImpl( {@JsonKey(name: 'completion_tokens') required this.completionTokens, @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens}) + @JsonKey(name: 'total_tokens') required this.totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + this.completionTokensDetails}) : super._(); factory _$CompletionUsageImpl.fromJson(Map json) => @@ -10796,9 +11990,14 @@ class _$CompletionUsageImpl extends _CompletionUsage { @JsonKey(name: 'total_tokens') final int totalTokens; + /// Breakdown of tokens used in a completion. + @override + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? completionTokensDetails; + @override String toString() { - return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens, completionTokensDetails: $completionTokensDetails)'; } @override @@ -10811,15 +12010,20 @@ class _$CompletionUsageImpl extends _CompletionUsage { (identical(other.promptTokens, promptTokens) || other.promptTokens == promptTokens) && (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens)); + other.totalTokens == totalTokens) && + (identical( + other.completionTokensDetails, completionTokensDetails) || + other.completionTokensDetails == completionTokensDetails)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, + totalTokens, completionTokensDetails); - @JsonKey(ignore: true) + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => @@ -10838,34 +12042,215 @@ abstract class _CompletionUsage extends CompletionUsage { const factory _CompletionUsage( {@JsonKey(name: 'completion_tokens') required final int? completionTokens, @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') - required final int totalTokens}) = _$CompletionUsageImpl; + @JsonKey(name: 'total_tokens') required final int totalTokens, + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + final CompletionTokensDetails? + completionTokensDetails}) = _$CompletionUsageImpl; const _CompletionUsage._() : super._(); factory _CompletionUsage.fromJson(Map json) = _$CompletionUsageImpl.fromJson; - @override - /// Number of tokens in the generated completion. + @override @JsonKey(name: 'completion_tokens') int? get completionTokens; - @override /// Number of tokens in the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used in the request (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Breakdown of tokens used in a completion. @override - @JsonKey(ignore: true) + @JsonKey(name: 'completion_tokens_details', includeIfNull: false) + CompletionTokensDetails? get completionTokensDetails; + + /// Create a copy of CompletionUsage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } +CompletionTokensDetails _$CompletionTokensDetailsFromJson( + Map json) { + return _CompletionTokensDetails.fromJson(json); +} + +/// @nodoc +mixin _$CompletionTokensDetails { + /// Tokens generated by the model for reasoning. + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens => throw _privateConstructorUsedError; + + /// Serializes this CompletionTokensDetails to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CompletionTokensDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionTokensDetailsCopyWith<$Res> { + factory $CompletionTokensDetailsCopyWith(CompletionTokensDetails value, + $Res Function(CompletionTokensDetails) then) = + _$CompletionTokensDetailsCopyWithImpl<$Res, CompletionTokensDetails>; + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class _$CompletionTokensDetailsCopyWithImpl<$Res, + $Val extends CompletionTokensDetails> + implements $CompletionTokensDetailsCopyWith<$Res> { + _$CompletionTokensDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_value.copyWith( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CompletionTokensDetailsImplCopyWith<$Res> + implements $CompletionTokensDetailsCopyWith<$Res> { + factory _$$CompletionTokensDetailsImplCopyWith( + _$CompletionTokensDetailsImpl value, + $Res Function(_$CompletionTokensDetailsImpl) then) = + __$$CompletionTokensDetailsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? reasoningTokens}); +} + +/// @nodoc +class __$$CompletionTokensDetailsImplCopyWithImpl<$Res> + extends _$CompletionTokensDetailsCopyWithImpl<$Res, + _$CompletionTokensDetailsImpl> + implements _$$CompletionTokensDetailsImplCopyWith<$Res> { + __$$CompletionTokensDetailsImplCopyWithImpl( + _$CompletionTokensDetailsImpl _value, + $Res Function(_$CompletionTokensDetailsImpl) _then) + : super(_value, _then); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? reasoningTokens = freezed, + }) { + return _then(_$CompletionTokensDetailsImpl( + reasoningTokens: freezed == reasoningTokens + ? _value.reasoningTokens + : reasoningTokens // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionTokensDetailsImpl extends _CompletionTokensDetails { + const _$CompletionTokensDetailsImpl( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + this.reasoningTokens}) + : super._(); + + factory _$CompletionTokensDetailsImpl.fromJson(Map json) => + _$$CompletionTokensDetailsImplFromJson(json); + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens; + + @override + String toString() { + return 'CompletionTokensDetails(reasoningTokens: $reasoningTokens)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionTokensDetailsImpl && + (identical(other.reasoningTokens, reasoningTokens) || + other.reasoningTokens == reasoningTokens)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, reasoningTokens); + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => __$$CompletionTokensDetailsImplCopyWithImpl< + _$CompletionTokensDetailsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CompletionTokensDetailsImplToJson( + this, + ); + } +} + +abstract class _CompletionTokensDetails extends CompletionTokensDetails { + const factory _CompletionTokensDetails( + {@JsonKey(name: 'reasoning_tokens', includeIfNull: false) + final int? reasoningTokens}) = _$CompletionTokensDetailsImpl; + const _CompletionTokensDetails._() : super._(); + + factory _CompletionTokensDetails.fromJson(Map json) = + _$CompletionTokensDetailsImpl.fromJson; + + /// Tokens generated by the model for reasoning. + @override + @JsonKey(name: 'reasoning_tokens', includeIfNull: false) + int? get reasoningTokens; + + /// Create a copy of CompletionTokensDetails + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CompletionTokensDetailsImplCopyWith<_$CompletionTokensDetailsImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateEmbeddingRequest _$CreateEmbeddingRequestFromJson( Map json) { return _CreateEmbeddingRequest.fromJson(json); @@ -10894,8 +12279,12 @@ mixin _$CreateEmbeddingRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -10928,6 +12317,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -10961,6 +12352,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingModelCopyWith<$Res> get model { @@ -10969,6 +12362,8 @@ class _$CreateEmbeddingRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingInputCopyWith<$Res> get input { @@ -11010,6 +12405,8 @@ class __$$CreateEmbeddingRequestImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11103,12 +12500,14 @@ class _$CreateEmbeddingRequestImpl extends _CreateEmbeddingRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input, encodingFormat, dimensions, user); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> @@ -11137,33 +12536,35 @@ abstract class _CreateEmbeddingRequest extends CreateEmbeddingRequest { factory _CreateEmbeddingRequest.fromJson(Map json) = _$CreateEmbeddingRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_EmbeddingModelConverter() EmbeddingModel get model; - @override /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override @_EmbeddingInputConverter() EmbeddingInput get input; - @override /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @override @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat get encodingFormat; - @override /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + @override @JsonKey(includeIfNull: false) int? get dimensions; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateEmbeddingRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11222,6 +12623,8 @@ mixin _$EmbeddingModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11241,6 +12644,9 @@ class _$EmbeddingModelCopyWithImpl<$Res, $Val extends EmbeddingModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11262,6 +12668,8 @@ class __$$EmbeddingModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11305,11 +12713,13 @@ class _$EmbeddingModelEnumerationImpl extends EmbeddingModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> @@ -11396,7 +12806,10 @@ abstract class EmbeddingModelEnumeration extends EmbeddingModel { @override EmbeddingModels get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelEnumerationImplCopyWith<_$EmbeddingModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11418,6 +12831,8 @@ class __$$EmbeddingModelStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingModelStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11461,11 +12876,13 @@ class _$EmbeddingModelStringImpl extends EmbeddingModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> @@ -11553,7 +12970,10 @@ abstract class EmbeddingModelString extends EmbeddingModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingModelStringImplCopyWith<_$EmbeddingModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11628,6 +13048,8 @@ mixin _$EmbeddingInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -11647,6 +13069,9 @@ class _$EmbeddingInputCopyWithImpl<$Res, $Val extends EmbeddingInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -11668,6 +13093,8 @@ class __$$EmbeddingInputListListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11718,12 +13145,14 @@ class _$EmbeddingInputListListIntImpl extends EmbeddingInputListListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> @@ -11822,7 +13251,10 @@ abstract class EmbeddingInputListListInt extends EmbeddingInput { @override List> get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListListIntImplCopyWith<_$EmbeddingInputListListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -11845,6 +13277,8 @@ class __$$EmbeddingInputListIntImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListIntImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -11895,12 +13329,14 @@ class _$EmbeddingInputListIntImpl extends EmbeddingInputListInt { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> @@ -11999,7 +13435,10 @@ abstract class EmbeddingInputListInt extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListIntImplCopyWith<_$EmbeddingInputListIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12023,6 +13462,8 @@ class __$$EmbeddingInputListStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12073,12 +13514,14 @@ class _$EmbeddingInputListStringImpl extends EmbeddingInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> @@ -12177,7 +13620,10 @@ abstract class EmbeddingInputListString extends EmbeddingInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputListStringImplCopyWith<_$EmbeddingInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12199,6 +13645,8 @@ class __$$EmbeddingInputStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingInputStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12242,11 +13690,13 @@ class _$EmbeddingInputStringImpl extends EmbeddingInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> @@ -12346,7 +13796,10 @@ abstract class EmbeddingInputString extends EmbeddingInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingInputStringImplCopyWith<_$EmbeddingInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12372,8 +13825,12 @@ mixin _$CreateEmbeddingResponse { @JsonKey(includeIfNull: false) EmbeddingUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this CreateEmbeddingResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateEmbeddingResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12404,6 +13861,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12432,6 +13891,8 @@ class _$CreateEmbeddingResponseCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingUsageCopyWith<$Res>? get usage { @@ -12474,6 +13935,8 @@ class __$$CreateEmbeddingResponseImplCopyWithImpl<$Res> $Res Function(_$CreateEmbeddingResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12557,12 +14020,14 @@ class _$CreateEmbeddingResponseImpl extends _CreateEmbeddingResponse { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_data), model, object, usage); - @JsonKey(ignore: true) + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> @@ -12589,25 +14054,27 @@ abstract class _CreateEmbeddingResponse extends CreateEmbeddingResponse { factory _CreateEmbeddingResponse.fromJson(Map json) = _$CreateEmbeddingResponseImpl.fromJson; - @override - /// The list of embeddings generated by the model. - List get data; @override + List get data; /// The name of the model used to generate the embedding. - String get model; @override + String get model; /// The object type, which is always "list". - CreateEmbeddingResponseObject get object; @override + CreateEmbeddingResponseObject get object; /// The usage information for the request. + @override @JsonKey(includeIfNull: false) EmbeddingUsage? get usage; + + /// Create a copy of CreateEmbeddingResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12628,8 +14095,12 @@ mixin _$Embedding { /// The object type, which is always "embedding". EmbeddingObject get object => throw _privateConstructorUsedError; + /// Serializes this Embedding to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -12657,6 +14128,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12680,6 +14153,8 @@ class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> ) as $Val); } + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $EmbeddingVectorCopyWith<$Res> get embedding { @@ -12714,6 +14189,8 @@ class __$$EmbeddingImplCopyWithImpl<$Res> _$EmbeddingImpl _value, $Res Function(_$EmbeddingImpl) _then) : super(_value, _then); + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12779,11 +14256,13 @@ class _$EmbeddingImpl extends _Embedding { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, index, embedding, object); - @JsonKey(ignore: true) + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => @@ -12807,21 +14286,23 @@ abstract class _Embedding extends Embedding { factory _Embedding.fromJson(Map json) = _$EmbeddingImpl.fromJson; - @override - /// The index of the embedding in the list of embeddings. - int get index; @override + int get index; /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + @override @_EmbeddingVectorConverter() EmbeddingVector get embedding; - @override /// The object type, which is always "embedding". + @override EmbeddingObject get object; + + /// Create a copy of Embedding + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -12880,6 +14361,8 @@ mixin _$EmbeddingVector { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this EmbeddingVector to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -12899,6 +14382,9 @@ class _$EmbeddingVectorCopyWithImpl<$Res, $Val extends EmbeddingVector> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -12920,6 +14406,8 @@ class __$$EmbeddingVectorListDoubleImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorListDoubleImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -12970,12 +14458,14 @@ class _$EmbeddingVectorListDoubleImpl extends EmbeddingVectorListDouble { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> @@ -13062,7 +14552,10 @@ abstract class EmbeddingVectorListDouble extends EmbeddingVector { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorListDoubleImplCopyWith<_$EmbeddingVectorListDoubleImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13085,6 +14578,8 @@ class __$$EmbeddingVectorStringImplCopyWithImpl<$Res> $Res Function(_$EmbeddingVectorStringImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13128,11 +14623,13 @@ class _$EmbeddingVectorStringImpl extends EmbeddingVectorString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> @@ -13219,7 +14716,10 @@ abstract class EmbeddingVectorString extends EmbeddingVector { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingVector + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingVectorStringImplCopyWith<_$EmbeddingVectorStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13238,8 +14738,12 @@ mixin _$EmbeddingUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this EmbeddingUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $EmbeddingUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13265,6 +14769,8 @@ class _$EmbeddingUsageCopyWithImpl<$Res, $Val extends EmbeddingUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13305,6 +14811,8 @@ class __$$EmbeddingUsageImplCopyWithImpl<$Res> _$EmbeddingUsageImpl _value, $Res Function(_$EmbeddingUsageImpl) _then) : super(_value, _then); + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13361,11 +14869,13 @@ class _$EmbeddingUsageImpl extends _EmbeddingUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => @@ -13390,18 +14900,20 @@ abstract class _EmbeddingUsage extends EmbeddingUsage { factory _EmbeddingUsage.fromJson(Map json) = _$EmbeddingUsageImpl.fromJson; - @override - /// The number of tokens used by the prompt. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// The total number of tokens used by the request. + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of EmbeddingUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13422,7 +14934,12 @@ mixin _$CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @JsonKey(name: 'training_file') @@ -13433,9 +14950,9 @@ mixin _$CreateFineTuningJobRequest { FineTuningJobHyperparameters? get hyperparameters => throw _privateConstructorUsedError; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @JsonKey(includeIfNull: false) String? get suffix => throw _privateConstructorUsedError; @@ -13462,8 +14979,12 @@ mixin _$CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) int? get seed => throw _privateConstructorUsedError; + /// Serializes this CreateFineTuningJobRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateFineTuningJobRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -13501,6 +15022,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13544,6 +15067,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningModelCopyWith<$Res> get model { @@ -13552,6 +15077,8 @@ class _$CreateFineTuningJobRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters { @@ -13602,6 +15129,8 @@ class __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res> $Res Function(_$CreateFineTuningJobRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13676,7 +15205,12 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. @override @@ -13688,9 +15222,9 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { @JsonKey(includeIfNull: false) final FineTuningJobHyperparameters? hyperparameters; - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @override @JsonKey(includeIfNull: false) final String? suffix; @@ -13752,7 +15286,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { (identical(other.seed, seed) || other.seed == seed)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -13764,7 +15298,9 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { const DeepCollectionEquality().hash(_integrations), seed); - @JsonKey(ignore: true) + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> @@ -13797,36 +15333,39 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { factory _CreateFineTuningJobRequest.fromJson(Map json) = _$CreateFineTuningJobRequestImpl.fromJson; - @override - /// The name of the model to fine-tune. You can select one of the /// [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @override @_FineTuningModelConverter() FineTuningModel get model; - @override /// The ID of an uploaded file that contains training data. /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + /// `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the + /// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(includeIfNull: false) FineTuningJobHyperparameters? get hyperparameters; - @override - /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// A string of up to 64 characters that will be added to your fine-tuned model name. /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + @override @JsonKey(includeIfNull: false) String? get suffix; - @override /// The ID of an uploaded file that contains validation data. /// @@ -13838,21 +15377,25 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. /// /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override @JsonKey(name: 'validation_file', includeIfNull: false) String? get validationFile; - @override /// A list of integrations to enable for your fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; - @override /// The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. /// If a seed is not specified, one will be generated for you. + @override @JsonKey(includeIfNull: false) int? get seed; + + /// Create a copy of CreateFineTuningJobRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -13911,6 +15454,8 @@ mixin _$FineTuningModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -13930,6 +15475,9 @@ class _$FineTuningModelCopyWithImpl<$Res, $Val extends FineTuningModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -13952,6 +15500,8 @@ class __$$FineTuningModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -13996,11 +15546,13 @@ class _$FineTuningModelEnumerationImpl extends FineTuningModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> @@ -14087,7 +15639,10 @@ abstract class FineTuningModelEnumeration extends FineTuningModel { @override FineTuningModels get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelEnumerationImplCopyWith<_$FineTuningModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14110,6 +15665,8 @@ class __$$FineTuningModelStringImplCopyWithImpl<$Res> $Res Function(_$FineTuningModelStringImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14153,11 +15710,13 @@ class _$FineTuningModelStringImpl extends FineTuningModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> @@ -14244,7 +15803,10 @@ abstract class FineTuningModelString extends FineTuningModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningModelStringImplCopyWith<_$FineTuningModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14311,8 +15873,12 @@ mixin _$FineTuningJob { List? get integrations => throw _privateConstructorUsedError; + /// Serializes this FineTuningJob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14355,6 +15921,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14438,6 +16006,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> ) as $Val); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobErrorCopyWith<$Res>? get error { @@ -14450,6 +16020,8 @@ class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> }); } + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters { @@ -14500,6 +16072,8 @@ class __$$FineTuningJobImplCopyWithImpl<$Res> _$FineTuningJobImpl _value, $Res Function(_$FineTuningJobImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14733,7 +16307,7 @@ class _$FineTuningJobImpl extends _FineTuningJob { .equals(other._integrations, _integrations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -14753,7 +16327,9 @@ class _$FineTuningJobImpl extends _FineTuningJob { validationFile, const DeepCollectionEquality().hash(_integrations)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => @@ -14790,77 +16366,79 @@ abstract class _FineTuningJob extends FineTuningJob { factory _FineTuningJob.fromJson(Map json) = _$FineTuningJobImpl.fromJson; - @override - /// The object identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. - FineTuningJobError? get error; @override + FineTuningJobError? get error; /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'fine_tuned_model') String? get fineTunedModel; - @override /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'finished_at') int? get finishedAt; - @override /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - FineTuningJobHyperparameters get hyperparameters; @override + FineTuningJobHyperparameters get hyperparameters; /// The base model that is being fine-tuned. - String get model; @override + String get model; /// The object type, which is always "fine_tuning.job". - FineTuningJobObject get object; @override + FineTuningJobObject get object; /// The organization that owns the fine-tuning job. + @override @JsonKey(name: 'organization_id') String get organizationId; - @override /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'result_files') List get resultFiles; - @override /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - FineTuningJobStatus get status; @override + FineTuningJobStatus get status; /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @override @JsonKey(name: 'trained_tokens') int? get trainedTokens; - @override /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'training_file') String get trainingFile; - @override /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override @JsonKey(name: 'validation_file') String? get validationFile; - @override /// A list of integrations to enable for this fine-tuning job. + @override @JsonKey(includeIfNull: false) List? get integrations; + + /// Create a copy of FineTuningJob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -14880,8 +16458,12 @@ mixin _$FineTuningIntegration { /// to your run, and set a default entity (team, username, etc) to be associated with your run. FineTuningIntegrationWandb get wandb => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegration to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -14908,6 +16490,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -14926,6 +16510,8 @@ class _$FineTuningIntegrationCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningIntegrationWandbCopyWith<$Res> get wandb { @@ -14959,6 +16545,8 @@ class __$$FineTuningIntegrationImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15011,11 +16599,13 @@ class _$FineTuningIntegrationImpl extends _FineTuningIntegration { (identical(other.wandb, wandb) || other.wandb == wandb)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, wandb); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> @@ -15040,18 +16630,20 @@ abstract class _FineTuningIntegration extends FineTuningIntegration { factory _FineTuningIntegration.fromJson(Map json) = _$FineTuningIntegrationImpl.fromJson; - @override - /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - FineTuningIntegrationType get type; @override + FineTuningIntegrationType get type; /// The settings for your integration with Weights and Biases. This payload specifies the project that /// metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags /// to your run, and set a default entity (team, username, etc) to be associated with your run. + @override FineTuningIntegrationWandb get wandb; + + /// Create a copy of FineTuningIntegration + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationImplCopyWith<_$FineTuningIntegrationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15080,8 +16672,12 @@ mixin _$FineTuningIntegrationWandb { @JsonKey(includeIfNull: false) List? get tags => throw _privateConstructorUsedError; + /// Serializes this FineTuningIntegrationWandb to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningIntegrationWandbCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15111,6 +16707,8 @@ class _$FineTuningIntegrationWandbCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15166,6 +16764,8 @@ class __$$FineTuningIntegrationWandbImplCopyWithImpl<$Res> $Res Function(_$FineTuningIntegrationWandbImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15257,12 +16857,14 @@ class _$FineTuningIntegrationWandbImpl extends _FineTuningIntegrationWandb { const DeepCollectionEquality().equals(other._tags, _tags)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, project, name, entity, const DeepCollectionEquality().hash(_tags)); - @JsonKey(ignore: true) + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> @@ -15289,29 +16891,31 @@ abstract class _FineTuningIntegrationWandb extends FineTuningIntegrationWandb { factory _FineTuningIntegrationWandb.fromJson(Map json) = _$FineTuningIntegrationWandbImpl.fromJson; - @override - /// The name of the project that the new run will be created under. - String get project; @override + String get project; /// A display name to set for the run. If not set, we will use the Job ID as the name. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The entity to use for the run. This allows you to set the team or username of the WandB user that you would /// like associated with the run. If not set, the default entity for the registered WandB API key is used. + @override @JsonKey(includeIfNull: false) String? get entity; - @override /// A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some /// default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + @override @JsonKey(includeIfNull: false) List? get tags; + + /// Create a copy of FineTuningIntegrationWandb + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningIntegrationWandbImplCopyWith<_$FineTuningIntegrationWandbImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15331,8 +16935,12 @@ mixin _$FineTuningJobError { /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. String? get param => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15356,6 +16964,8 @@ class _$FineTuningJobErrorCopyWithImpl<$Res, $Val extends FineTuningJobError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15399,6 +17009,8 @@ class __$$FineTuningJobErrorImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobErrorImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15460,11 +17072,13 @@ class _$FineTuningJobErrorImpl extends _FineTuningJobError { (identical(other.param, param) || other.param == param)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message, param); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => @@ -15489,20 +17103,22 @@ abstract class _FineTuningJobError extends FineTuningJobError { factory _FineTuningJobError.fromJson(Map json) = _$FineTuningJobErrorImpl.fromJson; - @override - /// A machine-readable error code. - String get code; @override + String get code; /// A human-readable error message. - String get message; @override + String get message; /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + @override String? get param; + + /// Create a copy of FineTuningJobError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -15514,14 +17130,20 @@ FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson( /// @nodoc mixin _$FineTuningJobHyperparameters { - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobHyperparameters to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobHyperparametersCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -15553,6 +17175,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15566,6 +17190,8 @@ class _$FineTuningJobHyperparametersCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningNEpochsCopyWith<$Res> get nEpochs { @@ -15603,6 +17229,8 @@ class __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobHyperparametersImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15630,8 +17258,10 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { Map json) => _$$FineTuningJobHyperparametersImplFromJson(json); - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @override @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') @@ -15650,11 +17280,13 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { (identical(other.nEpochs, nEpochs) || other.nEpochs == nEpochs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, nEpochs); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobHyperparametersImplCopyWith< @@ -15682,15 +17314,19 @@ abstract class _FineTuningJobHyperparameters factory _FineTuningJobHyperparameters.fromJson(Map json) = _$FineTuningJobHyperparametersImpl.fromJson; + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + /// manually, we support any number between 1 and 50 epochs. @override - - /// The number of epochs to train the model for. An epoch refers to one - /// full cycle through the training dataset. @_FineTuningNEpochsConverter() @JsonKey(name: 'n_epochs') FineTuningNEpochs get nEpochs; + + /// Create a copy of FineTuningJobHyperparameters + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobHyperparametersImplCopyWith< _$FineTuningJobHyperparametersImpl> get copyWith => throw _privateConstructorUsedError; @@ -15750,6 +17386,8 @@ mixin _$FineTuningNEpochs { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this FineTuningNEpochs to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -15769,6 +17407,9 @@ class _$FineTuningNEpochsCopyWithImpl<$Res, $Val extends FineTuningNEpochs> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -15791,6 +17432,8 @@ class __$$FineTuningNEpochsEnumerationImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15835,11 +17478,13 @@ class _$FineTuningNEpochsEnumerationImpl extends FineTuningNEpochsEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsEnumerationImplCopyWith< @@ -15928,7 +17573,10 @@ abstract class FineTuningNEpochsEnumeration extends FineTuningNEpochs { @override FineTuningNEpochsOptions get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsEnumerationImplCopyWith< _$FineTuningNEpochsEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -15951,6 +17599,8 @@ class __$$FineTuningNEpochsIntImplCopyWithImpl<$Res> $Res Function(_$FineTuningNEpochsIntImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -15994,11 +17644,13 @@ class _$FineTuningNEpochsIntImpl extends FineTuningNEpochsInt { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> @@ -16086,7 +17738,10 @@ abstract class FineTuningNEpochsInt extends FineTuningNEpochs { @override int get value; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningNEpochs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningNEpochsIntImplCopyWith<_$FineTuningNEpochsIntImpl> get copyWith => throw _privateConstructorUsedError; } @@ -16109,8 +17764,12 @@ mixin _$ListPaginatedFineTuningJobsResponse { ListPaginatedFineTuningJobsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListPaginatedFineTuningJobsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListPaginatedFineTuningJobsResponseCopyWith< ListPaginatedFineTuningJobsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16141,6 +17800,8 @@ class _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16190,6 +17851,8 @@ class __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res> $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16264,12 +17927,14 @@ class _$ListPaginatedFineTuningJobsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), hasMore, object); - @JsonKey(ignore: true) + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListPaginatedFineTuningJobsResponseImplCopyWith< @@ -16298,21 +17963,23 @@ abstract class _ListPaginatedFineTuningJobsResponse Map json) = _$ListPaginatedFineTuningJobsResponseImpl.fromJson; - @override - /// The list of fine-tuning jobs. - List get data; @override + List get data; /// Whether there are more fine-tuning jobs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; - @override /// The object type, which is always "list". + @override ListPaginatedFineTuningJobsResponseObject get object; + + /// Create a copy of ListPaginatedFineTuningJobsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListPaginatedFineTuningJobsResponseImplCopyWith< _$ListPaginatedFineTuningJobsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16332,8 +17999,12 @@ mixin _$ListFineTuningJobEventsResponse { ListFineTuningJobEventsResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobEventsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobEventsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16362,6 +18033,8 @@ class _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16405,6 +18078,8 @@ class __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobEventsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16466,12 +18141,14 @@ class _$ListFineTuningJobEventsResponseImpl (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_data), object); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobEventsResponseImplCopyWith< @@ -16498,16 +18175,18 @@ abstract class _ListFineTuningJobEventsResponse factory _ListFineTuningJobEventsResponse.fromJson(Map json) = _$ListFineTuningJobEventsResponseImpl.fromJson; - @override - /// The list of fine-tuning job events. - List get data; @override + List get data; /// The object type, which is always "list". + @override ListFineTuningJobEventsResponseObject get object; + + /// Create a copy of ListFineTuningJobEventsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobEventsResponseImplCopyWith< _$ListFineTuningJobEventsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16539,8 +18218,12 @@ mixin _$ListFineTuningJobCheckpointsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListFineTuningJobCheckpointsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListFineTuningJobCheckpointsResponseCopyWith< ListFineTuningJobCheckpointsResponse> get copyWith => throw _privateConstructorUsedError; @@ -16573,6 +18256,8 @@ class _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16634,6 +18319,8 @@ class __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res> $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16732,7 +18419,7 @@ class _$ListFineTuningJobCheckpointsResponseImpl (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -16742,7 +18429,9 @@ class _$ListFineTuningJobCheckpointsResponseImpl lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListFineTuningJobCheckpointsResponseImplCopyWith< @@ -16774,31 +18463,33 @@ abstract class _ListFineTuningJobCheckpointsResponse Map json) = _$ListFineTuningJobCheckpointsResponseImpl.fromJson; - @override - /// The list of fine-tuning job checkpoints. - List get data; @override + List get data; /// The object type, which is always "list". - ListFineTuningJobCheckpointsResponseObject get object; @override + ListFineTuningJobCheckpointsResponseObject get object; /// The ID of the first checkpoint in the list. + @override @JsonKey(name: 'first_id', includeIfNull: false) String? get firstId; - @override /// The ID of the last checkpoint in the list. + @override @JsonKey(name: 'last_id', includeIfNull: false) String? get lastId; - @override /// Whether there are more checkpoints to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListFineTuningJobCheckpointsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListFineTuningJobCheckpointsResponseImplCopyWith< _$ListFineTuningJobCheckpointsResponseImpl> get copyWith => throw _privateConstructorUsedError; @@ -16826,8 +18517,12 @@ mixin _$FineTuningJobEvent { /// The object type, which is always "fine_tuning.job.event". FineTuningJobEventObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -16856,6 +18551,8 @@ class _$FineTuningJobEventCopyWithImpl<$Res, $Val extends FineTuningJobEvent> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -16914,6 +18611,8 @@ class __$$FineTuningJobEventImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobEventImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17001,12 +18700,14 @@ class _$FineTuningJobEventImpl extends _FineTuningJobEvent { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, level, message, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => @@ -17034,29 +18735,31 @@ abstract class _FineTuningJobEvent extends FineTuningJobEvent { factory _FineTuningJobEvent.fromJson(Map json) = _$FineTuningJobEventImpl.fromJson; - @override - /// The event identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the event was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The log level of the event. - FineTuningJobEventLevel get level; @override + FineTuningJobEventLevel get level; /// The message of the event. - String get message; @override + String get message; /// The object type, which is always "fine_tuning.job.event". + @override FineTuningJobEventObject get object; + + /// Create a copy of FineTuningJobEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17095,8 +18798,12 @@ mixin _$FineTuningJobCheckpoint { FineTuningJobCheckpointObject get object => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpoint to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17131,6 +18838,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17174,6 +18883,8 @@ class _$FineTuningJobCheckpointCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics { @@ -17217,6 +18928,8 @@ class __$$FineTuningJobCheckpointImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17334,12 +19047,14 @@ class _$FineTuningJobCheckpointImpl extends _FineTuningJobCheckpoint { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, createdAt, fineTunedModelCheckpoint, stepNumber, metrics, fineTuningJobId, object); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> @@ -17371,40 +19086,42 @@ abstract class _FineTuningJobCheckpoint extends FineTuningJobCheckpoint { factory _FineTuningJobCheckpoint.fromJson(Map json) = _$FineTuningJobCheckpointImpl.fromJson; - @override - /// The checkpoint identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) for when the checkpoint was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the fine-tuned checkpoint model that is created. + @override @JsonKey(name: 'fine_tuned_model_checkpoint') String get fineTunedModelCheckpoint; - @override /// The step number that the checkpoint was created at. + @override @JsonKey(name: 'step_number') int get stepNumber; - @override /// Metrics at the step number during the fine-tuning job. - FineTuningJobCheckpointMetrics get metrics; @override + FineTuningJobCheckpointMetrics get metrics; /// The name of the fine-tuning job that this checkpoint was created from. + @override @JsonKey(name: 'fine_tuning_job_id') String get fineTuningJobId; - @override /// The object type, which is always "fine_tuning.job.checkpoint". + @override FineTuningJobCheckpointObject get object; + + /// Create a copy of FineTuningJobCheckpoint + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> get copyWith => throw _privateConstructorUsedError; } @@ -17444,8 +19161,12 @@ mixin _$FineTuningJobCheckpointMetrics { @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy => throw _privateConstructorUsedError; + /// Serializes this FineTuningJobCheckpointMetrics to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $FineTuningJobCheckpointMetricsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17483,6 +19204,8 @@ class _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17560,6 +19283,8 @@ class __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res> $Res Function(_$FineTuningJobCheckpointMetricsImpl) _then) : super(_value, _then); + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17688,7 +19413,7 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidMeanTokenAccuracy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -17700,7 +19425,9 @@ class _$FineTuningJobCheckpointMetricsImpl fullValidLoss, fullValidMeanTokenAccuracy); - @JsonKey(ignore: true) + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$FineTuningJobCheckpointMetricsImplCopyWith< @@ -17738,43 +19465,45 @@ abstract class _FineTuningJobCheckpointMetrics factory _FineTuningJobCheckpointMetrics.fromJson(Map json) = _$FineTuningJobCheckpointMetricsImpl.fromJson; - @override - /// The step number that the metrics were recorded at. + @override @JsonKey(includeIfNull: false) double? get step; - @override /// The training loss at the step number. + @override @JsonKey(name: 'train_loss', includeIfNull: false) double? get trainLoss; - @override /// The training mean token accuracy at the step number. + @override @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) double? get trainMeanTokenAccuracy; - @override /// The validation loss at the step number. + @override @JsonKey(name: 'valid_loss', includeIfNull: false) double? get validLoss; - @override /// The validation mean token accuracy at the step number. + @override @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) double? get validMeanTokenAccuracy; - @override /// The full validation loss at the step number. + @override @JsonKey(name: 'full_valid_loss', includeIfNull: false) double? get fullValidLoss; - @override /// The full validation mean token accuracy at the step number. + @override @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) double? get fullValidMeanTokenAccuracy; + + /// Create a copy of FineTuningJobCheckpointMetrics + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$FineTuningJobCheckpointMetricsImplCopyWith< _$FineTuningJobCheckpointMetricsImpl> get copyWith => throw _privateConstructorUsedError; @@ -17822,8 +19551,12 @@ mixin _$CreateImageRequest { @JsonKey(includeIfNull: false) String? get user => throw _privateConstructorUsedError; + /// Serializes this CreateImageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateImageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -17869,6 +19602,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -17917,6 +19652,8 @@ class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> ) as $Val); } + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateImageRequestModelCopyWith<$Res>? get model { @@ -17972,6 +19709,8 @@ class __$$CreateImageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18116,12 +19855,14 @@ class _$CreateImageRequestImpl extends _CreateImageRequest { (identical(other.user, user) || other.user == user)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, prompt, model, n, quality, responseFormat, size, style, user); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => @@ -18164,52 +19905,54 @@ abstract class _CreateImageRequest extends CreateImageRequest { factory _CreateImageRequest.fromJson(Map json) = _$CreateImageRequestImpl.fromJson; - @override - /// A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - String get prompt; @override + String get prompt; /// The model to use for image generation. + @override @_CreateImageRequestModelConverter() @JsonKey(includeIfNull: false) CreateImageRequestModel? get model; - @override /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + @override @JsonKey(includeIfNull: false) int? get n; - @override /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - ImageQuality get quality; @override + ImageQuality get quality; /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + @override @JsonKey( name: 'response_format', includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageResponseFormat? get responseFormat; - @override /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageSize? get size; - @override /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) ImageStyle? get style; - @override /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override @JsonKey(includeIfNull: false) String? get user; + + /// Create a copy of CreateImageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -18272,6 +20015,8 @@ mixin _$CreateImageRequestModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateImageRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -18292,6 +20037,9 @@ class _$CreateImageRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -18314,6 +20062,8 @@ class __$$CreateImageRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18360,11 +20110,13 @@ class _$CreateImageRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelEnumerationImplCopyWith< @@ -18454,7 +20206,10 @@ abstract class CreateImageRequestModelEnumeration @override ImageModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelEnumerationImplCopyWith< _$CreateImageRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -18480,6 +20235,8 @@ class __$$CreateImageRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateImageRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18525,11 +20282,13 @@ class _$CreateImageRequestModelStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateImageRequestModelStringImplCopyWith< @@ -18617,7 +20376,10 @@ abstract class CreateImageRequestModelString extends CreateImageRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateImageRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateImageRequestModelStringImplCopyWith< _$CreateImageRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -18635,8 +20397,12 @@ mixin _$ImagesResponse { /// The list of images generated by the model. List get data => throw _privateConstructorUsedError; + /// Serializes this ImagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18660,6 +20426,8 @@ class _$ImagesResponseCopyWithImpl<$Res, $Val extends ImagesResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18698,6 +20466,8 @@ class __$$ImagesResponseImplCopyWithImpl<$Res> _$ImagesResponseImpl _value, $Res Function(_$ImagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18757,12 +20527,14 @@ class _$ImagesResponseImpl extends _ImagesResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, created, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => @@ -18786,16 +20558,18 @@ abstract class _ImagesResponse extends ImagesResponse { factory _ImagesResponse.fromJson(Map json) = _$ImagesResponseImpl.fromJson; - @override - /// The Unix timestamp (in seconds) when the image was created. - int get created; @override + int get created; /// The list of images generated by the model. + @override List get data; + + /// Create a copy of ImagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -18818,8 +20592,12 @@ mixin _$Image { @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt => throw _privateConstructorUsedError; + /// Serializes this Image to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ImageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -18845,6 +20623,8 @@ class _$ImageCopyWithImpl<$Res, $Val extends Image> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18891,6 +20671,8 @@ class __$$ImageImplCopyWithImpl<$Res> _$ImageImpl _value, $Res Function(_$ImageImpl) _then) : super(_value, _then); + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -18959,11 +20741,13 @@ class _$ImageImpl extends _Image { other.revisedPrompt == revisedPrompt)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, b64Json, url, revisedPrompt); - @JsonKey(ignore: true) + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ImageImplCopyWith<_$ImageImpl> get copyWith => @@ -18987,23 +20771,25 @@ abstract class _Image extends Image { factory _Image.fromJson(Map json) = _$ImageImpl.fromJson; - @override - /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @override @JsonKey(name: 'b64_json', includeIfNull: false) String? get b64Json; - @override /// The URL of the generated image, if `response_format` is `url` (default). + @override @JsonKey(includeIfNull: false) String? get url; - @override /// The prompt that was used to generate the image, if there was any revision to the prompt. + @override @JsonKey(name: 'revised_prompt', includeIfNull: false) String? get revisedPrompt; + + /// Create a copy of Image + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ImageImplCopyWith<_$ImageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19027,8 +20813,12 @@ mixin _$Model { @JsonKey(name: 'owned_by') String get ownedBy => throw _privateConstructorUsedError; + /// Serializes this Model to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModelCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19054,6 +20844,8 @@ class _$ModelCopyWithImpl<$Res, $Val extends Model> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19105,6 +20897,8 @@ class __$$ModelImplCopyWithImpl<$Res> _$ModelImpl _value, $Res Function(_$ModelImpl) _then) : super(_value, _then); + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19180,11 +20974,13 @@ class _$ModelImpl extends _Model { (identical(other.ownedBy, ownedBy) || other.ownedBy == ownedBy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, created, object, ownedBy); - @JsonKey(ignore: true) + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModelImplCopyWith<_$ModelImpl> get copyWith => @@ -19208,25 +21004,27 @@ abstract class _Model extends Model { factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; - @override - /// The model identifier, which can be referenced in the API endpoints. - String get id; @override + String get id; /// The Unix timestamp (in seconds) when the model was created. - int get created; @override + int get created; /// The object type, which is always "model". - ModelObject get object; @override + ModelObject get object; /// The organization that owns the model. + @override @JsonKey(name: 'owned_by') String get ownedBy; + + /// Create a copy of Model + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModelImplCopyWith<_$ModelImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19243,8 +21041,12 @@ mixin _$ListModelsResponse { /// The list of models. List get data => throw _privateConstructorUsedError; + /// Serializes this ListModelsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListModelsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19268,6 +21070,8 @@ class _$ListModelsResponseCopyWithImpl<$Res, $Val extends ListModelsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19306,6 +21110,8 @@ class __$$ListModelsResponseImplCopyWithImpl<$Res> $Res Function(_$ListModelsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19365,12 +21171,14 @@ class _$ListModelsResponseImpl extends _ListModelsResponse { const DeepCollectionEquality().equals(other._data, _data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, object, const DeepCollectionEquality().hash(_data)); - @JsonKey(ignore: true) + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => @@ -19394,16 +21202,18 @@ abstract class _ListModelsResponse extends ListModelsResponse { factory _ListModelsResponse.fromJson(Map json) = _$ListModelsResponseImpl.fromJson; - @override - /// The object type, which is always "list". - ListModelsResponseObject get object; @override + ListModelsResponseObject get object; /// The list of models. + @override List get data; + + /// Create a copy of ListModelsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19423,8 +21233,12 @@ mixin _$DeleteModelResponse { /// The object type, which is always "model". String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteModelResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteModelResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19448,6 +21262,8 @@ class _$DeleteModelResponseCopyWithImpl<$Res, $Val extends DeleteModelResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19491,6 +21307,8 @@ class __$$DeleteModelResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteModelResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19552,11 +21370,13 @@ class _$DeleteModelResponseImpl extends _DeleteModelResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => @@ -19581,20 +21401,22 @@ abstract class _DeleteModelResponse extends DeleteModelResponse { factory _DeleteModelResponse.fromJson(Map json) = _$DeleteModelResponseImpl.fromJson; - @override - /// The model identifier. - String get id; @override + String get id; /// Whether the model was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always "model". + @override String get object; + + /// Create a copy of DeleteModelResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19617,8 +21439,12 @@ mixin _$CreateModerationRequest { @_ModerationInputConverter() ModerationInput get input => throw _privateConstructorUsedError; + /// Serializes this CreateModerationRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -19650,6 +21476,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19668,6 +21496,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationModelCopyWith<$Res>? get model { @@ -19680,6 +21510,8 @@ class _$CreateModerationRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationInputCopyWith<$Res> get input { @@ -19720,6 +21552,8 @@ class __$$CreateModerationRequestImplCopyWithImpl<$Res> $Res Function(_$CreateModerationRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19779,11 +21613,13 @@ class _$CreateModerationRequestImpl extends _CreateModerationRequest { (identical(other.input, input) || other.input == input)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, model, input); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> @@ -19810,21 +21646,23 @@ abstract class _CreateModerationRequest extends CreateModerationRequest { factory _CreateModerationRequest.fromJson(Map json) = _$CreateModerationRequestImpl.fromJson; - @override - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. /// /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @override @_ModerationModelConverter() @JsonKey(includeIfNull: false) ModerationModel? get model; - @override /// The input text to classify + @override @_ModerationInputConverter() ModerationInput get input; + + /// Create a copy of CreateModerationRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -19883,6 +21721,8 @@ mixin _$ModerationModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -19902,6 +21742,9 @@ class _$ModerationModelCopyWithImpl<$Res, $Val extends ModerationModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -19924,6 +21767,8 @@ class __$$ModerationModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ModerationModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -19968,11 +21813,13 @@ class _$ModerationModelEnumerationImpl extends ModerationModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> @@ -20059,7 +21906,10 @@ abstract class ModerationModelEnumeration extends ModerationModel { @override ModerationModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelEnumerationImplCopyWith<_$ModerationModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20082,6 +21932,8 @@ class __$$ModerationModelStringImplCopyWithImpl<$Res> $Res Function(_$ModerationModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20125,11 +21977,13 @@ class _$ModerationModelStringImpl extends ModerationModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> @@ -20216,7 +22070,10 @@ abstract class ModerationModelString extends ModerationModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationModelStringImplCopyWith<_$ModerationModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20275,6 +22132,8 @@ mixin _$ModerationInput { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModerationInput to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -20294,6 +22153,9 @@ class _$ModerationInputCopyWithImpl<$Res, $Val extends ModerationInput> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -20315,6 +22177,8 @@ class __$$ModerationInputListStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputListStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20365,12 +22229,14 @@ class _$ModerationInputListStringImpl extends ModerationInputListString { const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> @@ -20457,7 +22323,10 @@ abstract class ModerationInputListString extends ModerationInput { @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputListStringImplCopyWith<_$ModerationInputListStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20480,6 +22349,8 @@ class __$$ModerationInputStringImplCopyWithImpl<$Res> $Res Function(_$ModerationInputStringImpl) _then) : super(_value, _then); + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20523,11 +22394,13 @@ class _$ModerationInputStringImpl extends ModerationInputString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> @@ -20614,7 +22487,10 @@ abstract class ModerationInputString extends ModerationInput { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ModerationInput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationInputStringImplCopyWith<_$ModerationInputStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20635,8 +22511,12 @@ mixin _$CreateModerationResponse { /// A list of moderation objects. List get results => throw _privateConstructorUsedError; + /// Serializes this CreateModerationResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateModerationResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20661,6 +22541,8 @@ class _$CreateModerationResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20707,6 +22589,8 @@ class __$$CreateModerationResponseImplCopyWithImpl<$Res> $Res Function(_$CreateModerationResponseImpl) _then) : super(_value, _then); + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20778,12 +22662,14 @@ class _$CreateModerationResponseImpl extends _CreateModerationResponse { const DeepCollectionEquality().equals(other._results, _results)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, id, model, const DeepCollectionEquality().hash(_results)); - @JsonKey(ignore: true) + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> @@ -20809,20 +22695,22 @@ abstract class _CreateModerationResponse extends CreateModerationResponse { factory _CreateModerationResponse.fromJson(Map json) = _$CreateModerationResponseImpl.fromJson; - @override - /// The unique identifier for the moderation request. - String get id; @override + String get id; /// The model used to generate the moderation results. - String get model; @override + String get model; /// A list of moderation objects. + @override List get results; + + /// Create a copy of CreateModerationResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -20844,8 +22732,12 @@ mixin _$Moderation { ModerationCategoriesScores get categoryScores => throw _privateConstructorUsedError; + /// Serializes this Moderation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -20876,6 +22768,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -20899,6 +22793,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> ) as $Val); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesCopyWith<$Res> get categories { @@ -20907,6 +22803,8 @@ class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> }); } + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModerationCategoriesScoresCopyWith<$Res> get categoryScores { @@ -20945,6 +22843,8 @@ class __$$ModerationImplCopyWithImpl<$Res> _$ModerationImpl _value, $Res Function(_$ModerationImpl) _then) : super(_value, _then); + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21011,12 +22911,14 @@ class _$ModerationImpl extends _Moderation { other.categoryScores == categoryScores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, flagged, categories, categoryScores); - @JsonKey(ignore: true) + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => @@ -21042,21 +22944,23 @@ abstract class _Moderation extends Moderation { factory _Moderation.fromJson(Map json) = _$ModerationImpl.fromJson; - @override - /// Whether any of the below categories are flagged. - bool get flagged; @override + bool get flagged; /// A list of the categories, and whether they are flagged or not. - ModerationCategories get categories; @override + ModerationCategories get categories; /// A list of the categories along with their scores as predicted by model. + @override @JsonKey(name: 'category_scores') ModerationCategoriesScores get categoryScores; + + /// Create a copy of Moderation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21107,8 +23011,12 @@ mixin _$ModerationCategories { @JsonKey(name: 'violence/graphic') bool get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategories to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21144,6 +23052,8 @@ class _$ModerationCategoriesCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21238,6 +23148,8 @@ class __$$ModerationCategoriesImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21407,7 +23319,7 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -21423,7 +23335,9 @@ class _$ModerationCategoriesImpl extends _ModerationCategories { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> @@ -21460,59 +23374,61 @@ abstract class _ModerationCategories extends ModerationCategories { factory _ModerationCategories.fromJson(Map json) = _$ModerationCategoriesImpl.fromJson; - @override - /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - bool get hate; @override + bool get hate; /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @override @JsonKey(name: 'hate/threatening') bool get hateThreatening; - @override /// Content that expresses, incites, or promotes harassing language towards any target. - bool get harassment; @override + bool get harassment; /// Harassment content that also includes violence or serious harm towards any target. + @override @JsonKey(name: 'harassment/threatening') bool get harassmentThreatening; - @override /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm') bool get selfHarm; - @override /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @override @JsonKey(name: 'self-harm/intent') bool get selfHarmIntent; - @override /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @override @JsonKey(name: 'self-harm/instructions') bool get selfHarmInstructions; - @override /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - bool get sexual; @override + bool get sexual; /// Sexual content that includes an individual who is under 18 years old. + @override @JsonKey(name: 'sexual/minors') bool get sexualMinors; - @override /// Content that depicts death, violence, or physical injury. - bool get violence; @override + bool get violence; /// Content that depicts death, violence, or physical injury in graphic detail. + @override @JsonKey(name: 'violence/graphic') bool get violenceGraphic; + + /// Create a copy of ModerationCategories + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -21564,8 +23480,12 @@ mixin _$ModerationCategoriesScores { @JsonKey(name: 'violence/graphic') double get violenceGraphic => throw _privateConstructorUsedError; + /// Serializes this ModerationCategoriesScores to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModerationCategoriesScoresCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -21602,6 +23522,8 @@ class _$ModerationCategoriesScoresCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21699,6 +23621,8 @@ class __$$ModerationCategoriesScoresImplCopyWithImpl<$Res> $Res Function(_$ModerationCategoriesScoresImpl) _then) : super(_value, _then); + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -21869,7 +23793,7 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { other.violenceGraphic == violenceGraphic)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -21885,7 +23809,9 @@ class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { violence, violenceGraphic); - @JsonKey(ignore: true) + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> @@ -21922,59 +23848,61 @@ abstract class _ModerationCategoriesScores extends ModerationCategoriesScores { factory _ModerationCategoriesScores.fromJson(Map json) = _$ModerationCategoriesScoresImpl.fromJson; - @override - /// The score for the category 'hate'. - double get hate; @override + double get hate; /// The score for the category 'hate/threatening'. + @override @JsonKey(name: 'hate/threatening') double get hateThreatening; - @override /// The score for the category 'harassment'. - double get harassment; @override + double get harassment; /// The score for the category 'harassment/threatening'. + @override @JsonKey(name: 'harassment/threatening') double get harassmentThreatening; - @override /// The score for the category 'self-harm'. + @override @JsonKey(name: 'self-harm') double get selfHarm; - @override /// The score for the category 'self-harm/intent'. + @override @JsonKey(name: 'self-harm/intent') double get selfHarmIntent; - @override /// The score for the category 'self-harm/instructions'. + @override @JsonKey(name: 'self-harm/instructions') double get selfHarmInstructions; - @override /// The score for the category 'sexual'. - double get sexual; @override + double get sexual; /// The score for the category 'sexual/minors'. + @override @JsonKey(name: 'sexual/minors') double get sexualMinors; - @override /// The score for the category 'violence'. - double get violence; @override + double get violence; /// The score for the category 'violence/graphic'. + @override @JsonKey(name: 'violence/graphic') double get violenceGraphic; + + /// Create a copy of ModerationCategoriesScores + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22007,38 +23935,60 @@ mixin _$AssistantObject { /// The system instructions that the assistant uses. The maximum length is 256,000 characters. String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this AssistantObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -22081,6 +24031,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22154,6 +24106,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> ) as $Val); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -22166,6 +24120,8 @@ class _$AssistantObjectCopyWithImpl<$Res, $Val extends AssistantObject> }); } + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantObjectResponseFormatCopyWith<$Res>? get responseFormat { @@ -22220,6 +24176,8 @@ class __$$AssistantObjectImplCopyWithImpl<$Res> _$AssistantObjectImpl _value, $Res Function(_$AssistantObjectImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22349,10 +24307,12 @@ class _$AssistantObjectImpl extends _AssistantObject { @override final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override List get tools { if (_tools is EqualUnmodifiableListView) return _tools; @@ -22365,10 +24325,14 @@ class _$AssistantObjectImpl extends _AssistantObject { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -22378,23 +24342,38 @@ class _$AssistantObjectImpl extends _AssistantObject { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -22431,7 +24410,7 @@ class _$AssistantObjectImpl extends _AssistantObject { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -22449,7 +24428,9 @@ class _$AssistantObjectImpl extends _AssistantObject { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => @@ -22488,72 +24469,92 @@ abstract class _AssistantObject extends AssistantObject { factory _AssistantObject.fromJson(Map json) = _$AssistantObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `assistant`. - AssistantObjectObject get object; @override + AssistantObjectObject get object; /// The Unix timestamp (in seconds) for when the assistant was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The name of the assistant. The maximum length is 256 characters. - String? get name; @override + String? get name; /// The description of the assistant. The maximum length is 512 characters. - String? get description; @override + String? get description; /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - String get model; @override + String get model; /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - String? get instructions; @override + String? get instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_AssistantObjectResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) AssistantObjectResponseFormat? get responseFormat; + + /// Create a copy of AssistantObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectImplCopyWith<_$AssistantObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -22561,11 +24562,10 @@ abstract class _AssistantObject extends AssistantObject { AssistantObjectResponseFormat _$AssistantObjectResponseFormatFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'mode': return AssistantObjectResponseFormatEnumeration.fromJson(json); - case 'assistantsResponseFormat': - return AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return AssistantObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -22581,52 +24581,48 @@ mixin _$AssistantObjectResponseFormat { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -22649,6 +24645,9 @@ class _$AssistantObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -22671,6 +24670,8 @@ class __$$AssistantObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -22691,7 +24692,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl extends AssistantObjectResponseFormatEnumeration { const _$AssistantObjectResponseFormatEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'mode', super._(); factory _$AssistantObjectResponseFormatEnumerationImpl.fromJson( @@ -22706,7 +24707,7 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override String toString() { - return 'AssistantObjectResponseFormat.enumeration(value: $value)'; + return 'AssistantObjectResponseFormat.mode(value: $value)'; } @override @@ -22717,11 +24718,13 @@ class _$AssistantObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantObjectResponseFormatEnumerationImplCopyWith< @@ -22733,31 +24736,30 @@ class _$AssistantObjectResponseFormatEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return enumeration(value); + return mode(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return enumeration?.call(value); + return mode?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (mode != null) { + return mode(value); } return orElse(); } @@ -22766,38 +24768,33 @@ class _$AssistantObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return enumeration(this); + return mode(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return enumeration?.call(this); + return mode?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (mode != null) { + return mode(this); } return orElse(); } @@ -22823,59 +24820,61 @@ abstract class AssistantObjectResponseFormatEnumeration @override AssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantObjectResponseFormatEnumerationImplCopyWith< _$AssistantObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl) + factory _$$AssistantObjectResponseFormatResponseFormatImplCopyWith( + _$AssistantObjectResponseFormatResponseFormatImpl value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) then) = - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$AssistantObjectResponseFormatCopyWithImpl<$Res, - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$AssistantObjectResponseFormatResponseFormatImpl> implements - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl( + _$AssistantObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$AssistantObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$AssistantObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$AssistantObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -22883,80 +24882,79 @@ class __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< /// @nodoc @JsonSerializable() -class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl - extends AssistantObjectResponseFormatAssistantsResponseFormat { - const _$AssistantObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$AssistantObjectResponseFormatResponseFormatImpl + extends AssistantObjectResponseFormatResponseFormat { + const _$AssistantObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'assistantsResponseFormat', + : $type = $type ?? 'responseFormat', super._(); - factory _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$AssistantObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$AssistantObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'AssistantObjectResponseFormat.assistantsResponseFormat(value: $value)'; + return 'AssistantObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$AssistantObjectResponseFormatAssistantsResponseFormatImpl && + other is _$AssistantObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => - __$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl>( + __$$AssistantObjectResponseFormatResponseFormatImplCopyWithImpl< + _$AssistantObjectResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(AssistantResponseFormatMode value) enumeration, - required TResult Function(AssistantsResponseFormat value) - assistantsResponseFormat, + required TResult Function(AssistantResponseFormatMode value) mode, + required TResult Function(ResponseFormat value) responseFormat, }) { - return assistantsResponseFormat(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(AssistantResponseFormatMode value)? enumeration, - TResult? Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult? Function(AssistantResponseFormatMode value)? mode, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return assistantsResponseFormat?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(AssistantResponseFormatMode value)? enumeration, - TResult Function(AssistantsResponseFormat value)? assistantsResponseFormat, + TResult Function(AssistantResponseFormatMode value)? mode, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -22965,66 +24963,64 @@ class _$AssistantObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(AssistantObjectResponseFormatEnumeration value) - enumeration, - required TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value) - assistantsResponseFormat, + mode, + required TResult Function(AssistantObjectResponseFormatResponseFormat value) + responseFormat, }) { - return assistantsResponseFormat(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult? Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult? Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult? Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return assistantsResponseFormat?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantObjectResponseFormatEnumeration value)? - enumeration, - TResult Function( - AssistantObjectResponseFormatAssistantsResponseFormat value)? - assistantsResponseFormat, + TResult Function(AssistantObjectResponseFormatEnumeration value)? mode, + TResult Function(AssistantObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (assistantsResponseFormat != null) { - return assistantsResponseFormat(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$AssistantObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class AssistantObjectResponseFormatAssistantsResponseFormat +abstract class AssistantObjectResponseFormatResponseFormat extends AssistantObjectResponseFormat { - const factory AssistantObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl; - const AssistantObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory AssistantObjectResponseFormatResponseFormat( + final ResponseFormat value) = + _$AssistantObjectResponseFormatResponseFormatImpl; + const AssistantObjectResponseFormatResponseFormat._() : super._(); - factory AssistantObjectResponseFormatAssistantsResponseFormat.fromJson( + factory AssistantObjectResponseFormatResponseFormat.fromJson( Map json) = - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$AssistantObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of AssistantObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AssistantObjectResponseFormatResponseFormatImplCopyWith< + _$AssistantObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23051,39 +25047,61 @@ mixin _$CreateAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this CreateAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -23125,6 +25143,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23183,6 +25203,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantModelCopyWith<$Res> get model { @@ -23191,6 +25213,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -23203,6 +25227,8 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -23259,6 +25285,8 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$CreateAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23361,10 +25389,12 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -23378,10 +25408,14 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -23392,23 +25426,38 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -23441,7 +25490,7 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -23456,7 +25505,9 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> @@ -23492,64 +25543,84 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { factory _CreateAssistantRequest.fromJson(Map json) = _$CreateAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @_AssistantModelConverter() AssistantModel get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of CreateAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23608,6 +25679,8 @@ mixin _$AssistantModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -23627,6 +25700,9 @@ class _$AssistantModelCopyWithImpl<$Res, $Val extends AssistantModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -23648,6 +25724,8 @@ class __$$AssistantModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$AssistantModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23691,11 +25769,13 @@ class _$AssistantModelEnumerationImpl extends AssistantModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> @@ -23782,7 +25862,10 @@ abstract class AssistantModelEnumeration extends AssistantModel { @override AssistantModels get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelEnumerationImplCopyWith<_$AssistantModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23804,6 +25887,8 @@ class __$$AssistantModelStringImplCopyWithImpl<$Res> $Res Function(_$AssistantModelStringImpl) _then) : super(_value, _then); + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -23847,11 +25932,13 @@ class _$AssistantModelStringImpl extends AssistantModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> @@ -23939,7 +26026,10 @@ abstract class AssistantModelString extends AssistantModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of AssistantModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantModelStringImplCopyWith<_$AssistantModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -23949,9 +26039,8 @@ CreateAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -23968,19 +26057,19 @@ mixin _$CreateAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -23990,29 +26079,29 @@ mixin _$CreateAssistantRequestResponseFormat { CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -24035,6 +26124,9 @@ class _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -24061,6 +26153,8 @@ class __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24107,11 +26201,13 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -24125,7 +26221,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -24134,7 +26230,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -24143,7 +26239,7 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -24159,8 +26255,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -24170,9 +26266,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -24182,9 +26277,8 @@ class _$CreateAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -24214,61 +26308,66 @@ abstract class CreateAssistantRequestResponseFormatEnumeration @override CreateAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< _$CreateAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$CreateAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateAssistantRequestResponseFormatResponseFormatImpl> implements - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -24276,80 +26375,79 @@ class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends CreateAssistantRequestResponseFormatAssistantsResponseFormat { - const _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateAssistantRequestResponseFormatResponseFormatImpl + extends CreateAssistantRequestResponseFormatResponseFormat { + const _$CreateAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateAssistantRequestResponseFormat.format(value: $value)'; + return 'CreateAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -24361,10 +26459,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl CreateAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + CreateAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -24372,11 +26470,10 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -24384,43 +26481,43 @@ class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat +abstract class CreateAssistantRequestResponseFormatResponseFormat extends CreateAssistantRequestResponseFormat { - const factory CreateAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const CreateAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateAssistantRequestResponseFormatResponseFormatImpl; + const CreateAssistantRequestResponseFormatResponseFormat._() : super._(); - factory CreateAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of CreateAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24447,7 +26544,8 @@ mixin _$ModifyAssistantRequest { @JsonKey(includeIfNull: false) String? get instructions => throw _privateConstructorUsedError; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. List get tools => throw _privateConstructorUsedError; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @@ -24458,32 +26556,53 @@ mixin _$ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) double? get topP => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat => throw _privateConstructorUsedError; + /// Serializes this ModifyAssistantRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyAssistantRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -24525,6 +26644,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24588,6 +26709,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -24600,6 +26723,8 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, }); } + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -24655,6 +26780,8 @@ class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyAssistantRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -24764,10 +26891,12 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(includeIfNull: false) final String? instructions; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. final List _tools; - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override @JsonKey() List get tools { @@ -24793,10 +26922,14 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -24807,23 +26940,38 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @JsonKey(name: 'top_p', includeIfNull: false) final double? topP; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -24857,7 +27005,7 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -24873,7 +27021,9 @@ class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { topP, responseFormat); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> @@ -24910,69 +27060,89 @@ abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { factory _ModifyAssistantRequest.fromJson(Map json) = _$ModifyAssistantRequestImpl.fromJson; - @override - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override @JsonKey(includeIfNull: false) String? get model; - @override /// The name of the assistant. The maximum length is 256 characters. + @override @JsonKey(includeIfNull: false) String? get name; - @override /// The description of the assistant. The maximum length is 512 characters. + @override @JsonKey(includeIfNull: false) String? get description; - @override /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - List get tools; + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + /// types `code_interpreter`, `file_search`, or `function`. @override + List get tools; /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @override @JsonKey(name: 'file_ids') List get fileIds; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_ModifyAssistantRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) ModifyAssistantRequestResponseFormat? get responseFormat; + + /// Create a copy of ModifyAssistantRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24982,9 +27152,8 @@ ModifyAssistantRequestResponseFormat switch (json['runtimeType']) { case 'mode': return ModifyAssistantRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return ModifyAssistantRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return ModifyAssistantRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -25001,19 +27170,19 @@ mixin _$ModifyAssistantRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -25023,29 +27192,29 @@ mixin _$ModifyAssistantRequestResponseFormat { ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ModifyAssistantRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -25068,6 +27237,9 @@ class _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -25094,6 +27266,8 @@ class __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25140,11 +27314,13 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< @@ -25158,7 +27334,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -25167,7 +27343,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -25176,7 +27352,7 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25192,8 +27368,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -25203,9 +27379,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -25215,9 +27390,8 @@ class _$ModifyAssistantRequestResponseFormatEnumerationImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -25247,61 +27421,66 @@ abstract class ModifyAssistantRequestResponseFormatEnumeration @override ModifyAssistantResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< _$ModifyAssistantRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + factory _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl value, $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _$ModifyAssistantRequestResponseFormatResponseFormatImpl) then) = - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> implements - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$ModifyAssistantRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$ModifyAssistantRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -25309,80 +27488,79 @@ class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWi /// @nodoc @JsonSerializable() -class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - extends ModifyAssistantRequestResponseFormatAssistantsResponseFormat { - const _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$ModifyAssistantRequestResponseFormatResponseFormatImpl + extends ModifyAssistantRequestResponseFormatResponseFormat { + const _$ModifyAssistantRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'ModifyAssistantRequestResponseFormat.format(value: $value)'; + return 'ModifyAssistantRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl && + other is _$ModifyAssistantRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => - __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + __$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(ModifyAssistantResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(ModifyAssistantResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(ModifyAssistantResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -25394,10 +27572,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl ModifyAssistantRequestResponseFormatEnumeration value) mode, required TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) - format, + ModifyAssistantRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -25405,11 +27583,10 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult? mapOrNull({ TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult? Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -25417,43 +27594,43 @@ class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl TResult maybeMap({ TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? mode, - TResult Function( - ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(ModifyAssistantRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat +abstract class ModifyAssistantRequestResponseFormatResponseFormat extends ModifyAssistantRequestResponseFormat { - const factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl; - const ModifyAssistantRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory ModifyAssistantRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$ModifyAssistantRequestResponseFormatResponseFormatImpl; + const ModifyAssistantRequestResponseFormatResponseFormat._() : super._(); - factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + factory ModifyAssistantRequestResponseFormatResponseFormat.fromJson( Map json) = - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$ModifyAssistantRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of ModifyAssistantRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ModifyAssistantRequestResponseFormatResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25474,8 +27651,12 @@ mixin _$DeleteAssistantResponse { DeleteAssistantResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteAssistantResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteAssistantResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25500,6 +27681,8 @@ class _$DeleteAssistantResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25546,6 +27729,8 @@ class __$$DeleteAssistantResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteAssistantResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25607,11 +27792,13 @@ class _$DeleteAssistantResponseImpl extends _DeleteAssistantResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> @@ -25637,20 +27824,22 @@ abstract class _DeleteAssistantResponse extends DeleteAssistantResponse { factory _DeleteAssistantResponse.fromJson(Map json) = _$DeleteAssistantResponseImpl.fromJson; - @override - /// The assistant identifier. - String get id; @override + String get id; /// Whether the assistant was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `assistant.deleted`. + @override DeleteAssistantResponseObject get object; + + /// Create a copy of DeleteAssistantResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteAssistantResponseImplCopyWith<_$DeleteAssistantResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -25680,8 +27869,12 @@ mixin _$ListAssistantsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListAssistantsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListAssistantsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25711,6 +27904,8 @@ class _$ListAssistantsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25772,6 +27967,8 @@ class __$$ListAssistantsResponseImplCopyWithImpl<$Res> $Res Function(_$ListAssistantsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25868,12 +28065,14 @@ class _$ListAssistantsResponseImpl extends _ListAssistantsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> @@ -25901,35 +28100,250 @@ abstract class _ListAssistantsResponse extends ListAssistantsResponse { factory _ListAssistantsResponse.fromJson(Map json) = _$ListAssistantsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of assistants. - List get data; @override + List get data; /// The ID of the first assistant in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last assistant in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more assistants to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListAssistantsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListAssistantsResponseImplCopyWith<_$ListAssistantsResponseImpl> get copyWith => throw _privateConstructorUsedError; } +FileSearchRankingOptions _$FileSearchRankingOptionsFromJson( + Map json) { + return _FileSearchRankingOptions.fromJson(json); +} + +/// @nodoc +mixin _$FileSearchRankingOptions { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker => throw _privateConstructorUsedError; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; + + /// Serializes this FileSearchRankingOptions to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $FileSearchRankingOptionsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FileSearchRankingOptionsCopyWith<$Res> { + factory $FileSearchRankingOptionsCopyWith(FileSearchRankingOptions value, + $Res Function(FileSearchRankingOptions) then) = + _$FileSearchRankingOptionsCopyWithImpl<$Res, FileSearchRankingOptions>; + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class _$FileSearchRankingOptionsCopyWithImpl<$Res, + $Val extends FileSearchRankingOptions> + implements $FileSearchRankingOptionsCopyWith<$Res> { + _$FileSearchRankingOptionsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = null, + }) { + return _then(_value.copyWith( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FileSearchRankingOptionsImplCopyWith<$Res> + implements $FileSearchRankingOptionsCopyWith<$Res> { + factory _$$FileSearchRankingOptionsImplCopyWith( + _$FileSearchRankingOptionsImpl value, + $Res Function(_$FileSearchRankingOptionsImpl) then) = + __$$FileSearchRankingOptionsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); +} + +/// @nodoc +class __$$FileSearchRankingOptionsImplCopyWithImpl<$Res> + extends _$FileSearchRankingOptionsCopyWithImpl<$Res, + _$FileSearchRankingOptionsImpl> + implements _$$FileSearchRankingOptionsImplCopyWith<$Res> { + __$$FileSearchRankingOptionsImplCopyWithImpl( + _$FileSearchRankingOptionsImpl _value, + $Res Function(_$FileSearchRankingOptionsImpl) _then) + : super(_value, _then); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? ranker = freezed, + Object? scoreThreshold = null, + }) { + return _then(_$FileSearchRankingOptionsImpl( + ranker: freezed == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker?, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FileSearchRankingOptionsImpl extends _FileSearchRankingOptions { + const _$FileSearchRankingOptionsImpl( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.ranker, + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) + : super._(); + + factory _$FileSearchRankingOptionsImpl.fromJson(Map json) => + _$$FileSearchRankingOptionsImplFromJson(json); + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + final double scoreThreshold; + + @override + String toString() { + return 'FileSearchRankingOptions(ranker: $ranker, scoreThreshold: $scoreThreshold)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FileSearchRankingOptionsImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => __$$FileSearchRankingOptionsImplCopyWithImpl< + _$FileSearchRankingOptionsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FileSearchRankingOptionsImplToJson( + this, + ); + } +} + +abstract class _FileSearchRankingOptions extends FileSearchRankingOptions { + const factory _FileSearchRankingOptions( + {@JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final FileSearchRanker? ranker, + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = _$FileSearchRankingOptionsImpl; + const _FileSearchRankingOptions._() : super._(); + + factory _FileSearchRankingOptions.fromJson(Map json) = + _$FileSearchRankingOptionsImpl.fromJson; + + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + FileSearchRanker? get ranker; + + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @override + @JsonKey(name: 'score_threshold') + double get scoreThreshold; + + /// Create a copy of FileSearchRankingOptions + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$FileSearchRankingOptionsImplCopyWith<_$FileSearchRankingOptionsImpl> + get copyWith => throw _privateConstructorUsedError; +} + AssistantsNamedToolChoice _$AssistantsNamedToolChoiceFromJson( Map json) { return _AssistantsNamedToolChoice.fromJson(json); @@ -25945,8 +28359,12 @@ mixin _$AssistantsNamedToolChoice { AssistantsFunctionCallOption? get function => throw _privateConstructorUsedError; + /// Serializes this AssistantsNamedToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsNamedToolChoiceCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -25975,6 +28393,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -25993,6 +28413,8 @@ class _$AssistantsNamedToolChoiceCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsFunctionCallOptionCopyWith<$Res>? get function { @@ -26034,6 +28456,8 @@ class __$$AssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$AssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26087,11 +28511,13 @@ class _$AssistantsNamedToolChoiceImpl extends _AssistantsNamedToolChoice { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> @@ -26117,17 +28543,19 @@ abstract class _AssistantsNamedToolChoice extends AssistantsNamedToolChoice { factory _AssistantsNamedToolChoice.fromJson(Map json) = _$AssistantsNamedToolChoiceImpl.fromJson; - @override - /// The type of the tool. If type is `function`, the function name must be set - AssistantsToolType get type; @override + AssistantsToolType get type; /// No Description + @override @JsonKey(includeIfNull: false) AssistantsFunctionCallOption? get function; + + /// Create a copy of AssistantsNamedToolChoice + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsNamedToolChoiceImplCopyWith<_$AssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26142,8 +28570,12 @@ mixin _$AssistantsFunctionCallOption { /// The name of the function to call. String get name => throw _privateConstructorUsedError; + /// Serializes this AssistantsFunctionCallOption to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantsFunctionCallOptionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26170,6 +28602,8 @@ class _$AssistantsFunctionCallOptionCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26206,6 +28640,8 @@ class __$$AssistantsFunctionCallOptionImplCopyWithImpl<$Res> $Res Function(_$AssistantsFunctionCallOptionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26246,11 +28682,13 @@ class _$AssistantsFunctionCallOptionImpl extends _AssistantsFunctionCallOption { (identical(other.name, name) || other.name == name)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name); - @JsonKey(ignore: true) + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$AssistantsFunctionCallOptionImplCopyWith< @@ -26275,169 +28713,19 @@ abstract class _AssistantsFunctionCallOption factory _AssistantsFunctionCallOption.fromJson(Map json) = _$AssistantsFunctionCallOptionImpl.fromJson; - @override - /// The name of the function to call. + @override String get name; + + /// Create a copy of AssistantsFunctionCallOption + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$AssistantsFunctionCallOptionImplCopyWith< _$AssistantsFunctionCallOptionImpl> get copyWith => throw _privateConstructorUsedError; } -AssistantsResponseFormat _$AssistantsResponseFormatFromJson( - Map json) { - return _AssistantsResponseFormat.fromJson(json); -} - -/// @nodoc -mixin _$AssistantsResponseFormat { - /// Must be one of `text` or `json_object`. - AssistantsResponseFormatType get type => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $AssistantsResponseFormatCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $AssistantsResponseFormatCopyWith<$Res> { - factory $AssistantsResponseFormatCopyWith(AssistantsResponseFormat value, - $Res Function(AssistantsResponseFormat) then) = - _$AssistantsResponseFormatCopyWithImpl<$Res, AssistantsResponseFormat>; - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class _$AssistantsResponseFormatCopyWithImpl<$Res, - $Val extends AssistantsResponseFormat> - implements $AssistantsResponseFormatCopyWith<$Res> { - _$AssistantsResponseFormatCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$AssistantsResponseFormatImplCopyWith<$Res> - implements $AssistantsResponseFormatCopyWith<$Res> { - factory _$$AssistantsResponseFormatImplCopyWith( - _$AssistantsResponseFormatImpl value, - $Res Function(_$AssistantsResponseFormatImpl) then) = - __$$AssistantsResponseFormatImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({AssistantsResponseFormatType type}); -} - -/// @nodoc -class __$$AssistantsResponseFormatImplCopyWithImpl<$Res> - extends _$AssistantsResponseFormatCopyWithImpl<$Res, - _$AssistantsResponseFormatImpl> - implements _$$AssistantsResponseFormatImplCopyWith<$Res> { - __$$AssistantsResponseFormatImplCopyWithImpl( - _$AssistantsResponseFormatImpl _value, - $Res Function(_$AssistantsResponseFormatImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? type = null, - }) { - return _then(_$AssistantsResponseFormatImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormatType, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$AssistantsResponseFormatImpl extends _AssistantsResponseFormat { - const _$AssistantsResponseFormatImpl( - {this.type = AssistantsResponseFormatType.text}) - : super._(); - - factory _$AssistantsResponseFormatImpl.fromJson(Map json) => - _$$AssistantsResponseFormatImplFromJson(json); - - /// Must be one of `text` or `json_object`. - @override - @JsonKey() - final AssistantsResponseFormatType type; - - @override - String toString() { - return 'AssistantsResponseFormat(type: $type)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$AssistantsResponseFormatImpl && - (identical(other.type, type) || other.type == type)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, type); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => __$$AssistantsResponseFormatImplCopyWithImpl< - _$AssistantsResponseFormatImpl>(this, _$identity); - - @override - Map toJson() { - return _$$AssistantsResponseFormatImplToJson( - this, - ); - } -} - -abstract class _AssistantsResponseFormat extends AssistantsResponseFormat { - const factory _AssistantsResponseFormat( - {final AssistantsResponseFormatType type}) = - _$AssistantsResponseFormatImpl; - const _AssistantsResponseFormat._() : super._(); - - factory _AssistantsResponseFormat.fromJson(Map json) = - _$AssistantsResponseFormatImpl.fromJson; - - @override - - /// Must be one of `text` or `json_object`. - AssistantsResponseFormatType get type; - @override - @JsonKey(ignore: true) - _$$AssistantsResponseFormatImplCopyWith<_$AssistantsResponseFormatImpl> - get copyWith => throw _privateConstructorUsedError; -} - TruncationObject _$TruncationObjectFromJson(Map json) { return _TruncationObject.fromJson(json); } @@ -26451,8 +28739,12 @@ mixin _$TruncationObject { @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages => throw _privateConstructorUsedError; + /// Serializes this TruncationObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $TruncationObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26478,6 +28770,8 @@ class _$TruncationObjectCopyWithImpl<$Res, $Val extends TruncationObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26518,6 +28812,8 @@ class __$$TruncationObjectImplCopyWithImpl<$Res> $Res Function(_$TruncationObjectImpl) _then) : super(_value, _then); + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26572,11 +28868,13 @@ class _$TruncationObjectImpl extends _TruncationObject { other.lastMessages == lastMessages)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, lastMessages); - @JsonKey(ignore: true) + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => @@ -26601,17 +28899,19 @@ abstract class _TruncationObject extends TruncationObject { factory _TruncationObject.fromJson(Map json) = _$TruncationObjectImpl.fromJson; - @override - /// The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - TruncationObjectType get type; @override + TruncationObjectType get type; /// The number of most recent messages from the thread when constructing the context for the run. + @override @JsonKey(name: 'last_messages', includeIfNull: false) int? get lastMessages; + + /// Create a copy of TruncationObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$TruncationObjectImplCopyWith<_$TruncationObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -26685,7 +28985,9 @@ mixin _$RunObject { /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. List get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -26721,18 +29023,39 @@ mixin _$RunObject { @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls => throw _privateConstructorUsedError; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat => throw _privateConstructorUsedError; + /// Serializes this RunObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -26772,6 +29095,7 @@ abstract class $RunObjectCopyWith<$Res> { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -26795,6 +29119,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -26823,6 +29149,7 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_value.copyWith( @@ -26926,6 +29253,10 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -26933,6 +29264,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ) as $Val); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunRequiredActionCopyWith<$Res>? get requiredAction { @@ -26945,6 +29278,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunLastErrorCopyWith<$Res>? get lastError { @@ -26957,6 +29292,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -26970,6 +29307,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunCompletionUsageCopyWith<$Res>? get usage { @@ -26982,6 +29321,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -26994,6 +29335,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectToolChoiceCopyWith<$Res>? get toolChoice { @@ -27006,6 +29349,8 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> }); } + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectResponseFormatCopyWith<$Res> get responseFormat { @@ -27054,6 +29399,7 @@ abstract class _$$RunObjectImplCopyWith<$Res> @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat responseFormat}); @@ -27082,6 +29428,8 @@ class __$$RunObjectImplCopyWithImpl<$Res> _$RunObjectImpl _value, $Res Function(_$RunObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27110,6 +29458,7 @@ class __$$RunObjectImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = null, }) { return _then(_$RunObjectImpl( @@ -27213,6 +29562,10 @@ class __$$RunObjectImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as RunObjectToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: null == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -27252,6 +29605,7 @@ class _$RunObjectImpl extends _RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required this.toolChoice, + @JsonKey(name: 'parallel_tool_calls') required this.parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required this.responseFormat}) @@ -27348,10 +29702,14 @@ class _$RunObjectImpl extends _RunObject { return EqualUnmodifiableListView(_tools); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -27400,11 +29758,29 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(name: 'tool_choice') final RunObjectToolChoice? toolChoice; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls') + final bool? parallelToolCalls; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') @@ -27412,7 +29788,7 @@ class _$RunObjectImpl extends _RunObject { @override String toString() { - return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; + return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat)'; } @override @@ -27462,11 +29838,13 @@ class _$RunObjectImpl extends _RunObject { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -27495,10 +29873,13 @@ class _$RunObjectImpl extends _RunObject { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat ]); - @JsonKey(ignore: true) + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => @@ -27545,6 +29926,8 @@ abstract class _RunObject extends RunObject { @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') required final RunObjectToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls') + required final bool? parallelToolCalls, @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') required final RunObjectResponseFormat responseFormat}) = _$RunObjectImpl; @@ -27553,140 +29936,162 @@ abstract class _RunObject extends RunObject { factory _RunObject.fromJson(Map json) = _$RunObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run`. - RunObjectObject get object; @override + RunObjectObject get object; /// The Unix timestamp (in seconds) for when the run was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. - RunStatus get status; @override + RunStatus get status; /// Details on the action required to continue the run. Will be `null` if no action is required. + @override @JsonKey(name: 'required_action') RunRequiredAction? get requiredAction; - @override /// The last error associated with this run. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run will expire. + @override @JsonKey(name: 'expires_at') int? get expiresAt; - @override /// The Unix timestamp (in seconds) for when the run was started. + @override @JsonKey(name: 'started_at') int? get startedAt; - @override /// The Unix timestamp (in seconds) for when the run was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// Details on why the run is incomplete. Will be `null` if the run is not incomplete. + @override @JsonKey(name: 'incomplete_details') RunObjectIncompleteDetails? get incompleteDetails; - @override /// The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get model; @override + String get model; /// The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - String get instructions; @override + String get instructions; /// The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. - List get tools; @override + List get tools; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). - RunCompletionUsage? get usage; @override + RunCompletionUsage? get usage; /// The sampling temperature used for this run. If not set, defaults to 1. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override /// The nucleus sampling value used for this run. If not set, defaults to 1. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens; - @override /// The maximum number of completion tokens specified to have been used over the course of the run. + @override @JsonKey(name: 'max_completion_tokens') int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy') TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_RunObjectToolChoiceConverter() @JsonKey(name: 'tool_choice') RunObjectToolChoice? get toolChoice; + + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. @override + @JsonKey(name: 'parallel_tool_calls') + bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_RunObjectResponseFormatConverter() @JsonKey(name: 'response_format') RunObjectResponseFormat get responseFormat; + + /// Create a copy of RunObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectImplCopyWith<_$RunObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27705,8 +30110,12 @@ mixin _$RunRequiredAction { RunSubmitToolOutputs get submitToolOutputs => throw _privateConstructorUsedError; + /// Serializes this RunRequiredAction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunRequiredActionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27735,6 +30144,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27753,6 +30164,8 @@ class _$RunRequiredActionCopyWithImpl<$Res, $Val extends RunRequiredAction> ) as $Val); } + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunSubmitToolOutputsCopyWith<$Res> get submitToolOutputs { @@ -27788,6 +30201,8 @@ class __$$RunRequiredActionImplCopyWithImpl<$Res> $Res Function(_$RunRequiredActionImpl) _then) : super(_value, _then); + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27842,11 +30257,13 @@ class _$RunRequiredActionImpl extends _RunRequiredAction { other.submitToolOutputs == submitToolOutputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, submitToolOutputs); - @JsonKey(ignore: true) + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => @@ -27872,17 +30289,19 @@ abstract class _RunRequiredAction extends RunRequiredAction { factory _RunRequiredAction.fromJson(Map json) = _$RunRequiredActionImpl.fromJson; - @override - /// For now, this is always `submit_tool_outputs`. - RunRequiredActionType get type; @override + RunRequiredActionType get type; /// Details on the tool outputs needed for this run to continue. + @override @JsonKey(name: 'submit_tool_outputs') RunSubmitToolOutputs get submitToolOutputs; + + /// Create a copy of RunRequiredAction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunRequiredActionImplCopyWith<_$RunRequiredActionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -27899,8 +30318,12 @@ mixin _$RunLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -27924,6 +30347,8 @@ class _$RunLastErrorCopyWithImpl<$Res, $Val extends RunLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -27962,6 +30387,8 @@ class __$$RunLastErrorImplCopyWithImpl<$Res> _$RunLastErrorImpl _value, $Res Function(_$RunLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28012,11 +30439,13 @@ class _$RunLastErrorImpl extends _RunLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => @@ -28039,16 +30468,18 @@ abstract class _RunLastError extends RunLastError { factory _RunLastError.fromJson(Map json) = _$RunLastErrorImpl.fromJson; - @override - /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - RunLastErrorCode get code; @override + RunLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunLastErrorImplCopyWith<_$RunLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28066,8 +30497,12 @@ mixin _$RunObjectIncompleteDetails { RunObjectIncompleteDetailsReason? get reason => throw _privateConstructorUsedError; + /// Serializes this RunObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -28097,6 +30532,8 @@ class _$RunObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28137,6 +30574,8 @@ class __$$RunObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$RunObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28184,11 +30623,13 @@ class _$RunObjectIncompleteDetailsImpl extends _RunObjectIncompleteDetails { (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> @@ -28215,14 +30656,16 @@ abstract class _RunObjectIncompleteDetails extends RunObjectIncompleteDetails { factory _RunObjectIncompleteDetails.fromJson(Map json) = _$RunObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) RunObjectIncompleteDetailsReason? get reason; + + /// Create a copy of RunObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectIncompleteDetailsImplCopyWith<_$RunObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -28283,6 +30726,8 @@ mixin _$RunObjectToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -28302,6 +30747,9 @@ class _$RunObjectToolChoiceCopyWithImpl<$Res, $Val extends RunObjectToolChoice> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -28324,6 +30772,8 @@ class __$$RunObjectToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28369,11 +30819,13 @@ class _$RunObjectToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceEnumerationImplCopyWith< @@ -28464,7 +30916,10 @@ abstract class RunObjectToolChoiceEnumeration extends RunObjectToolChoice { @override RunObjectToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceEnumerationImplCopyWith< _$RunObjectToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -28495,6 +30950,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> $Res Function(_$RunObjectToolChoiceAssistantsNamedToolChoiceImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28508,6 +30965,8 @@ class __$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl<$Res> )); } + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -28549,11 +31008,13 @@ class _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -28648,7 +31109,10 @@ abstract class RunObjectToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$RunObjectToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -28659,8 +31123,8 @@ RunObjectResponseFormat _$RunObjectResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return RunObjectResponseFormatEnumeration.fromJson(json); - case 'format': - return RunObjectResponseFormatAssistantsResponseFormat.fromJson(json); + case 'responseFormat': + return RunObjectResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -28677,45 +31141,46 @@ mixin _$RunObjectResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunObjectResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -28736,6 +31201,9 @@ class _$RunObjectResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -28758,6 +31226,8 @@ class __$$RunObjectResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$RunObjectResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -28804,11 +31274,13 @@ class _$RunObjectResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunObjectResponseFormatEnumerationImplCopyWith< @@ -28820,7 +31292,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -28829,7 +31301,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -28838,7 +31310,7 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -28851,9 +31323,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -28862,8 +31333,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -28872,8 +31343,8 @@ class _$RunObjectResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -28903,55 +31374,58 @@ abstract class RunObjectResponseFormatEnumeration @override RunObjectResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunObjectResponseFormatEnumerationImplCopyWith< _$RunObjectResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - factory _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith( - _$RunObjectResponseFormatAssistantsResponseFormatImpl value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - then) = - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res>; +abstract class _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + factory _$$RunObjectResponseFormatResponseFormatImplCopyWith( + _$RunObjectResponseFormatResponseFormatImpl value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) then) = + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> +class __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$RunObjectResponseFormatCopyWithImpl<$Res, - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - implements - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith<$Res> { - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$RunObjectResponseFormatAssistantsResponseFormatImpl _value, - $Res Function(_$RunObjectResponseFormatAssistantsResponseFormatImpl) - _then) + _$RunObjectResponseFormatResponseFormatImpl> + implements _$$RunObjectResponseFormatResponseFormatImplCopyWith<$Res> { + __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl( + _$RunObjectResponseFormatResponseFormatImpl _value, + $Res Function(_$RunObjectResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$RunObjectResponseFormatAssistantsResponseFormatImpl( + return _then(_$RunObjectResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -28959,77 +31433,77 @@ class __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunObjectResponseFormatAssistantsResponseFormatImpl - extends RunObjectResponseFormatAssistantsResponseFormat { - const _$RunObjectResponseFormatAssistantsResponseFormatImpl(this.value, +class _$RunObjectResponseFormatResponseFormatImpl + extends RunObjectResponseFormatResponseFormat { + const _$RunObjectResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$RunObjectResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson(json); + _$$RunObjectResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'RunObjectResponseFormat.format(value: $value)'; + return 'RunObjectResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunObjectResponseFormatAssistantsResponseFormatImpl && + other is _$RunObjectResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> - get copyWith => - __$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$RunObjectResponseFormatAssistantsResponseFormatImpl>( - this, _$identity); + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> + get copyWith => __$$RunObjectResponseFormatResponseFormatImplCopyWithImpl< + _$RunObjectResponseFormatResponseFormatImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(RunObjectResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(RunObjectResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(RunObjectResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -29038,61 +31512,62 @@ class _$RunObjectResponseFormatAssistantsResponseFormatImpl @optionalTypeArgs TResult map({ required TResult Function(RunObjectResponseFormatEnumeration value) mode, - required TResult Function( - RunObjectResponseFormatAssistantsResponseFormat value) - format, + required TResult Function(RunObjectResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(RunObjectResponseFormatEnumeration value)? mode, - TResult? Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(RunObjectResponseFormatEnumeration value)? mode, - TResult Function(RunObjectResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(RunObjectResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( + return _$$RunObjectResponseFormatResponseFormatImplToJson( this, ); } } -abstract class RunObjectResponseFormatAssistantsResponseFormat +abstract class RunObjectResponseFormatResponseFormat extends RunObjectResponseFormat { - const factory RunObjectResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl; - const RunObjectResponseFormatAssistantsResponseFormat._() : super._(); + const factory RunObjectResponseFormatResponseFormat( + final ResponseFormat value) = _$RunObjectResponseFormatResponseFormatImpl; + const RunObjectResponseFormatResponseFormat._() : super._(); - factory RunObjectResponseFormatAssistantsResponseFormat.fromJson( + factory RunObjectResponseFormatResponseFormat.fromJson( Map json) = - _$RunObjectResponseFormatAssistantsResponseFormatImpl.fromJson; + _$RunObjectResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$RunObjectResponseFormatAssistantsResponseFormatImplCopyWith< - _$RunObjectResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of RunObjectResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunObjectResponseFormatResponseFormatImplCopyWith< + _$RunObjectResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29106,8 +31581,12 @@ mixin _$RunSubmitToolOutputs { @JsonKey(name: 'tool_calls') List get toolCalls => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutputs to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29132,6 +31611,8 @@ class _$RunSubmitToolOutputsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29165,6 +31646,8 @@ class __$$RunSubmitToolOutputsImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputsImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29217,12 +31700,14 @@ class _$RunSubmitToolOutputsImpl extends _RunSubmitToolOutputs { .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> @@ -29248,13 +31733,15 @@ abstract class _RunSubmitToolOutputs extends RunSubmitToolOutputs { factory _RunSubmitToolOutputs.fromJson(Map json) = _$RunSubmitToolOutputsImpl.fromJson; - @override - /// A list of the relevant tool calls. + @override @JsonKey(name: 'tool_calls') List get toolCalls; + + /// Create a copy of RunSubmitToolOutputs + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputsImplCopyWith<_$RunSubmitToolOutputsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29277,8 +31764,12 @@ mixin _$RunCompletionUsage { @JsonKey(name: 'total_tokens') int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29305,6 +31796,8 @@ class _$RunCompletionUsageCopyWithImpl<$Res, $Val extends RunCompletionUsage> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29351,6 +31844,8 @@ class __$$RunCompletionUsageImplCopyWithImpl<$Res> $Res Function(_$RunCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29420,12 +31915,14 @@ class _$RunCompletionUsageImpl extends _RunCompletionUsage { other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => @@ -29451,23 +31948,25 @@ abstract class _RunCompletionUsage extends RunCompletionUsage { factory _RunCompletionUsage.fromJson(Map json) = _$RunCompletionUsageImpl.fromJson; - @override - /// Number of completion tokens used over the course of the run. + @override @JsonKey(name: 'completion_tokens') int get completionTokens; - @override /// Number of prompt tokens used over the course of the run. + @override @JsonKey(name: 'prompt_tokens') int get promptTokens; - @override /// Total number of tokens used (prompt + completion). + @override @JsonKey(name: 'total_tokens') int get totalTokens; + + /// Create a copy of RunCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunCompletionUsageImplCopyWith<_$RunCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -29504,15 +32003,20 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -29541,11 +32045,28 @@ mixin _$CreateRunRequest { CreateRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat => @@ -29555,8 +32076,12 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -29590,6 +32115,8 @@ abstract class $CreateRunRequestCopyWith<$Res> { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29611,6 +32138,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29627,6 +32156,7 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29683,6 +32213,10 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29694,6 +32228,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ) as $Val); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestModelCopyWith<$Res>? get model { @@ -29706,6 +32242,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -29718,6 +32256,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -29731,6 +32271,8 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> }); } + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -29776,6 +32318,8 @@ abstract class _$$CreateRunRequestImplCopyWith<$Res> @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? responseFormat, @@ -29799,6 +32343,8 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -29815,6 +32361,7 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -29871,6 +32418,10 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -29909,6 +32460,8 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -29971,10 +32524,14 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -29985,12 +32542,15 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -30022,11 +32582,29 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -30039,7 +32617,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @override String toString() { - return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -30069,12 +32647,14 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -30091,10 +32671,13 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => @@ -30133,6 +32716,8 @@ abstract class _CreateRunRequest extends CreateRunRequest { @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateRunRequestResponseFormat? responseFormat, @@ -30143,96 +32728,121 @@ abstract class _CreateRunRequest extends CreateRunRequest { factory _CreateRunRequest.fromJson(Map json) = _$CreateRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_CreateRunRequestModelConverter() @JsonKey(includeIfNull: false) CreateRunRequestModel? get model; - @override /// Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + @override @JsonKey(name: 'additional_instructions', includeIfNull: false) String? get additionalInstructions; - @override /// Adds additional messages to the thread before creating the run. + @override @JsonKey(name: 'additional_messages', includeIfNull: false) List? get additionalMessages; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateRunRequestToolChoice? get toolChoice; + + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestImplCopyWith<_$CreateRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30240,9 +32850,9 @@ abstract class _CreateRunRequest extends CreateRunRequest { CreateRunRequestModel _$CreateRunRequestModelFromJson( Map json) { switch (json['runtimeType']) { - case 'enumeration': + case 'model': return CreateRunRequestModelEnumeration.fromJson(json); - case 'string': + case 'modelId': return CreateRunRequestModelString.fromJson(json); default: @@ -30259,43 +32869,44 @@ mixin _$CreateRunRequestModel { Object get value => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -30316,6 +32927,9 @@ class _$CreateRunRequestModelCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -30338,6 +32952,8 @@ class __$$CreateRunRequestModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30358,7 +32974,7 @@ class _$CreateRunRequestModelEnumerationImpl extends CreateRunRequestModelEnumeration { const _$CreateRunRequestModelEnumerationImpl(this.value, {final String? $type}) - : $type = $type ?? 'enumeration', + : $type = $type ?? 'model', super._(); factory _$CreateRunRequestModelEnumerationImpl.fromJson( @@ -30373,7 +32989,7 @@ class _$CreateRunRequestModelEnumerationImpl @override String toString() { - return 'CreateRunRequestModel.enumeration(value: $value)'; + return 'CreateRunRequestModel.model(value: $value)'; } @override @@ -30384,11 +33000,13 @@ class _$CreateRunRequestModelEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelEnumerationImplCopyWith< @@ -30399,30 +33017,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return enumeration(value); + return model(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return enumeration?.call(value); + return model?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(value); + if (model != null) { + return model(value); } return orElse(); } @@ -30430,31 +33048,30 @@ class _$CreateRunRequestModelEnumerationImpl @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return enumeration(this); + return model(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return enumeration?.call(this); + return model?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (enumeration != null) { - return enumeration(this); + if (model != null) { + return model(this); } return orElse(); } @@ -30477,7 +33094,10 @@ abstract class CreateRunRequestModelEnumeration extends CreateRunRequestModel { @override RunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelEnumerationImplCopyWith< _$CreateRunRequestModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -30503,6 +33123,8 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestModelStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30521,7 +33143,7 @@ class __$$CreateRunRequestModelStringImplCopyWithImpl<$Res> @JsonSerializable() class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { const _$CreateRunRequestModelStringImpl(this.value, {final String? $type}) - : $type = $type ?? 'string', + : $type = $type ?? 'modelId', super._(); factory _$CreateRunRequestModelStringImpl.fromJson( @@ -30536,7 +33158,7 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override String toString() { - return 'CreateRunRequestModel.string(value: $value)'; + return 'CreateRunRequestModel.modelId(value: $value)'; } @override @@ -30547,11 +33169,13 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> @@ -30561,30 +33185,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult when({ - required TResult Function(RunModels value) enumeration, - required TResult Function(String value) string, + required TResult Function(RunModels value) model, + required TResult Function(String value) modelId, }) { - return string(value); + return modelId(value); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(RunModels value)? enumeration, - TResult? Function(String value)? string, + TResult? Function(RunModels value)? model, + TResult? Function(String value)? modelId, }) { - return string?.call(value); + return modelId?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(RunModels value)? enumeration, - TResult Function(String value)? string, + TResult Function(RunModels value)? model, + TResult Function(String value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(value); + if (modelId != null) { + return modelId(value); } return orElse(); } @@ -30592,31 +33216,30 @@ class _$CreateRunRequestModelStringImpl extends CreateRunRequestModelString { @override @optionalTypeArgs TResult map({ - required TResult Function(CreateRunRequestModelEnumeration value) - enumeration, - required TResult Function(CreateRunRequestModelString value) string, + required TResult Function(CreateRunRequestModelEnumeration value) model, + required TResult Function(CreateRunRequestModelString value) modelId, }) { - return string(this); + return modelId(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult? Function(CreateRunRequestModelString value)? string, + TResult? Function(CreateRunRequestModelEnumeration value)? model, + TResult? Function(CreateRunRequestModelString value)? modelId, }) { - return string?.call(this); + return modelId?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(CreateRunRequestModelEnumeration value)? enumeration, - TResult Function(CreateRunRequestModelString value)? string, + TResult Function(CreateRunRequestModelEnumeration value)? model, + TResult Function(CreateRunRequestModelString value)? modelId, required TResult orElse(), }) { - if (string != null) { - return string(this); + if (modelId != null) { + return modelId(this); } return orElse(); } @@ -30639,7 +33262,10 @@ abstract class CreateRunRequestModelString extends CreateRunRequestModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestModelStringImplCopyWith<_$CreateRunRequestModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -30707,6 +33333,8 @@ mixin _$CreateRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -30728,6 +33356,9 @@ class _$CreateRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -30750,6 +33381,8 @@ class __$$CreateRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30796,11 +33429,13 @@ class _$CreateRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< @@ -30896,7 +33531,10 @@ abstract class CreateRunRequestToolChoiceEnumeration @override CreateRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceEnumerationImplCopyWith< _$CreateRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -30932,6 +33570,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -30945,6 +33585,8 @@ class __$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWithImpl< )); } + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -30987,11 +33629,13 @@ class _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -31089,7 +33733,10 @@ abstract class CreateRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -31100,9 +33747,8 @@ CreateRunRequestResponseFormat _$CreateRunRequestResponseFormatFromJson( switch (json['runtimeType']) { case 'mode': return CreateRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( - json); + case 'responseFormat': + return CreateRunRequestResponseFormatResponseFormat.fromJson(json); default: throw CheckedFromJsonException( @@ -31119,19 +33765,19 @@ mixin _$CreateRunRequestResponseFormat { @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -31140,27 +33786,27 @@ mixin _$CreateRunRequestResponseFormat { required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -31183,6 +33829,9 @@ class _$CreateRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -31205,6 +33854,8 @@ class __$$CreateRunRequestResponseFormatEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateRunRequestResponseFormatEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31251,11 +33902,13 @@ class _$CreateRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< @@ -31269,7 +33922,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -31278,7 +33931,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -31287,7 +33940,7 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -31302,8 +33955,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -31312,9 +33965,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -31323,9 +33975,8 @@ class _$CreateRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -31355,60 +34006,61 @@ abstract class CreateRunRequestResponseFormatEnumeration @override CreateRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateRunRequestResponseFormatEnumerationImplCopyWith< _$CreateRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) + factory _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateRunRequestResponseFormatResponseFormatImpl value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res>; + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - $Res> +class __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl<$Res> extends _$CreateRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - $Res> { - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl _value, - $Res Function( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl) - _then) + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith<$Res> { + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then(_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -31416,79 +34068,79 @@ class __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl /// @nodoc @JsonSerializable() -class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl(this.value, +class _$CreateRunRequestResponseFormatResponseFormatImpl + extends CreateRunRequestResponseFormatResponseFormat { + const _$CreateRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( - json); + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson(json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateRunRequestResponseFormat.format(value: $value)'; + return 'CreateRunRequestResponseFormat.responseFormat(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl && + other is _$CreateRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function(CreateRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -31499,62 +34151,63 @@ class _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl required TResult Function(CreateRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value) - format, + CreateRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @optionalTypeArgs TResult? mapOrNull({ TResult? Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult? Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult? Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @optionalTypeArgs TResult maybeMap({ TResult Function(CreateRunRequestResponseFormatEnumeration value)? mode, - TResult Function( - CreateRunRequestResponseFormatAssistantsResponseFormat value)? - format, + TResult Function(CreateRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateRunRequestResponseFormatResponseFormat extends CreateRunRequestResponseFormat { - const factory CreateRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateRunRequestResponseFormatAssistantsResponseFormat._() : super._(); + const factory CreateRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateRunRequestResponseFormatResponseFormatImpl; + const CreateRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson; + _$CreateRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of CreateRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31582,8 +34235,12 @@ mixin _$ListRunsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31612,6 +34269,8 @@ class _$ListRunsResponseCopyWithImpl<$Res, $Val extends ListRunsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31670,6 +34329,8 @@ class __$$ListRunsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31766,12 +34427,14 @@ class _$ListRunsResponseImpl extends _ListRunsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => @@ -31799,31 +34462,33 @@ abstract class _ListRunsResponse extends ListRunsResponse { factory _ListRunsResponse.fromJson(Map json) = _$ListRunsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of runs. - List get data; @override + List get data; /// The ID of the first run in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more runs to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunsResponseImplCopyWith<_$ListRunsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -31834,12 +34499,18 @@ ModifyRunRequest _$ModifyRunRequestFromJson(Map json) { /// @nodoc mixin _$ModifyRunRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -31863,6 +34534,8 @@ class _$ModifyRunRequestCopyWithImpl<$Res, $Val extends ModifyRunRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31896,6 +34569,8 @@ class __$$ModifyRunRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -31921,10 +34596,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { factory _$ModifyRunRequestImpl.fromJson(Map json) => _$$ModifyRunRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -31948,12 +34627,14 @@ class _$ModifyRunRequestImpl extends _ModifyRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => @@ -31977,13 +34658,17 @@ abstract class _ModifyRunRequest extends ModifyRunRequest { factory _ModifyRunRequest.fromJson(Map json) = _$ModifyRunRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyRunRequestImplCopyWith<_$ModifyRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32004,8 +34689,12 @@ mixin _$SubmitToolOutputsRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this SubmitToolOutputsRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $SubmitToolOutputsRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32034,6 +34723,8 @@ class _$SubmitToolOutputsRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32077,6 +34768,8 @@ class __$$SubmitToolOutputsRunRequestImplCopyWithImpl<$Res> $Res Function(_$SubmitToolOutputsRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32142,12 +34835,14 @@ class _$SubmitToolOutputsRunRequestImpl extends _SubmitToolOutputsRunRequest { (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_toolOutputs), stream); - @JsonKey(ignore: true) + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> @@ -32174,18 +34869,20 @@ abstract class _SubmitToolOutputsRunRequest factory _SubmitToolOutputsRunRequest.fromJson(Map json) = _$SubmitToolOutputsRunRequestImpl.fromJson; - @override - /// A list of tools for which the outputs are being submitted. + @override @JsonKey(name: 'tool_outputs') List get toolOutputs; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of SubmitToolOutputsRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$SubmitToolOutputsRunRequestImplCopyWith<_$SubmitToolOutputsRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32204,8 +34901,12 @@ mixin _$RunSubmitToolOutput { @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunSubmitToolOutput to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunSubmitToolOutputCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32231,6 +34932,8 @@ class _$RunSubmitToolOutputCopyWithImpl<$Res, $Val extends RunSubmitToolOutput> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32271,6 +34974,8 @@ class __$$RunSubmitToolOutputImplCopyWithImpl<$Res> $Res Function(_$RunSubmitToolOutputImpl) _then) : super(_value, _then); + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32326,11 +35031,13 @@ class _$RunSubmitToolOutputImpl extends _RunSubmitToolOutput { (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolCallId, output); - @JsonKey(ignore: true) + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => @@ -32356,18 +35063,20 @@ abstract class _RunSubmitToolOutput extends RunSubmitToolOutput { factory _RunSubmitToolOutput.fromJson(Map json) = _$RunSubmitToolOutputImpl.fromJson; - @override - /// The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + @override @JsonKey(name: 'tool_call_id', includeIfNull: false) String? get toolCallId; - @override /// The output of the tool call to be submitted to continue the run. + @override @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunSubmitToolOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunSubmitToolOutputImplCopyWith<_$RunSubmitToolOutputImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32387,8 +35096,12 @@ mixin _$RunToolCallObject { /// The function definition. RunToolCallFunction get function => throw _privateConstructorUsedError; + /// Serializes this RunToolCallObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32415,6 +35128,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32438,6 +35153,8 @@ class _$RunToolCallObjectCopyWithImpl<$Res, $Val extends RunToolCallObject> ) as $Val); } + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunToolCallFunctionCopyWith<$Res> get function { @@ -32470,6 +35187,8 @@ class __$$RunToolCallObjectImplCopyWithImpl<$Res> $Res Function(_$RunToolCallObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32532,11 +35251,13 @@ class _$RunToolCallObjectImpl extends _RunToolCallObject { other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => @@ -32561,20 +35282,22 @@ abstract class _RunToolCallObject extends RunToolCallObject { factory _RunToolCallObject.fromJson(Map json) = _$RunToolCallObjectImpl.fromJson; - @override - /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. - String get id; @override + String get id; /// The type of tool call the output is required for. For now, this is always `function`. - RunToolCallObjectType get type; @override + RunToolCallObjectType get type; /// The function definition. + @override RunToolCallFunction get function; + + /// Create a copy of RunToolCallObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallObjectImplCopyWith<_$RunToolCallObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32591,8 +35314,12 @@ mixin _$RunToolCallFunction { /// The arguments that the model expects you to pass to the function. String get arguments => throw _privateConstructorUsedError; + /// Serializes this RunToolCallFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunToolCallFunctionCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32616,6 +35343,8 @@ class _$RunToolCallFunctionCopyWithImpl<$Res, $Val extends RunToolCallFunction> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32654,6 +35383,8 @@ class __$$RunToolCallFunctionImplCopyWithImpl<$Res> $Res Function(_$RunToolCallFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32705,11 +35436,13 @@ class _$RunToolCallFunctionImpl extends _RunToolCallFunction { other.arguments == arguments)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments); - @JsonKey(ignore: true) + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => @@ -32733,16 +35466,18 @@ abstract class _RunToolCallFunction extends RunToolCallFunction { factory _RunToolCallFunction.fromJson(Map json) = _$RunToolCallFunctionImpl.fromJson; - @override - /// The name of the function. - String get name; @override + String get name; /// The arguments that the model expects you to pass to the function. + @override String get arguments; + + /// Create a copy of RunToolCallFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunToolCallFunctionImplCopyWith<_$RunToolCallFunctionImpl> get copyWith => throw _privateConstructorUsedError; } @@ -32779,15 +35514,20 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @JsonKey(name: 'top_p', includeIfNull: false) @@ -32816,11 +35556,28 @@ mixin _$CreateThreadAndRunRequest { CreateThreadAndRunRequestToolChoice? get toolChoice => throw _privateConstructorUsedError; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls => throw _privateConstructorUsedError; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat => @@ -32830,8 +35587,12 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) bool? get stream => throw _privateConstructorUsedError; + /// Serializes this CreateThreadAndRunRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadAndRunRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -32864,6 +35625,8 @@ abstract class $CreateThreadAndRunRequestCopyWith<$Res> { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -32888,6 +35651,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -32904,6 +35669,7 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -32960,6 +35726,10 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -32971,6 +35741,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadRequestCopyWith<$Res>? get thread { @@ -32983,6 +35755,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadAndRunModelCopyWith<$Res>? get model { @@ -32995,6 +35769,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -33007,6 +35783,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TruncationObjectCopyWith<$Res>? get truncationStrategy { @@ -33019,6 +35797,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestToolChoiceCopyWith<$Res>? get toolChoice { @@ -33032,6 +35812,8 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, }); } + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateThreadAndRunRequestResponseFormatCopyWith<$Res>? get responseFormat { @@ -33077,6 +35859,8 @@ abstract class _$$CreateThreadAndRunRequestImplCopyWith<$Res> @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33106,6 +35890,8 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33122,6 +35908,7 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, Object? toolChoice = freezed, + Object? parallelToolCalls = freezed, Object? responseFormat = freezed, Object? stream = freezed, }) { @@ -33178,6 +35965,10 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> ? _value.toolChoice : toolChoice // ignore: cast_nullable_to_non_nullable as CreateThreadAndRunRequestToolChoice?, + parallelToolCalls: freezed == parallelToolCalls + ? _value.parallelToolCalls + : parallelToolCalls // ignore: cast_nullable_to_non_nullable + as bool?, responseFormat: freezed == responseFormat ? _value.responseFormat : responseFormat // ignore: cast_nullable_to_non_nullable @@ -33212,6 +36003,8 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) this.toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + this.parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) this.responseFormat, @@ -33263,10 +36056,14 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -33277,12 +36074,15 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { return EqualUnmodifiableMapView(value); } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. @override @JsonKey(includeIfNull: false) final double? temperature; - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. @override @@ -33314,11 +36114,29 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. + @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls; + + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) @@ -33331,7 +36149,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @override String toString() { - return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, toolResources: $toolResources, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, parallelToolCalls: $parallelToolCalls, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -33360,12 +36178,14 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { other.truncationStrategy == truncationStrategy) && (identical(other.toolChoice, toolChoice) || other.toolChoice == toolChoice) && + (identical(other.parallelToolCalls, parallelToolCalls) || + other.parallelToolCalls == parallelToolCalls) && (identical(other.responseFormat, responseFormat) || other.responseFormat == responseFormat) && (identical(other.stream, stream) || other.stream == stream)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -33382,10 +36202,13 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { maxCompletionTokens, truncationStrategy, toolChoice, + parallelToolCalls, responseFormat, stream); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> @@ -33423,6 +36246,8 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) final CreateThreadAndRunRequestToolChoice? toolChoice, + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + final bool? parallelToolCalls, @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) final CreateThreadAndRunRequestResponseFormat? responseFormat, @@ -33433,96 +36258,121 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { factory _CreateThreadAndRunRequest.fromJson(Map json) = _$CreateThreadAndRunRequestImpl.fromJson; - @override - /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// If no thread is provided, an empty thread will be created. + @override @JsonKey(includeIfNull: false) CreateThreadRequest? get thread; - @override /// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + @override @_ThreadAndRunModelConverter() @JsonKey(includeIfNull: false) ThreadAndRunModel? get model; - @override /// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) String? get instructions; - @override /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + @override @JsonKey(includeIfNull: false) List? get tools; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; - @override - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + @override @JsonKey(includeIfNull: false) double? get temperature; - @override - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + /// of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + /// mass are considered. /// /// We generally recommend altering this or temperature but not both. + @override @JsonKey(name: 'top_p', includeIfNull: false) double? get topP; - @override /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; - @override /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + @override @JsonKey(name: 'max_completion_tokens', includeIfNull: false) int? get maxCompletionTokens; - @override /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + @override @JsonKey(name: 'truncation_strategy', includeIfNull: false) TruncationObject? get truncationStrategy; - @override /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tools and instead generates a message. /// `auto` is the default value and means the model can pick between generating a message or calling one or more tools. /// `required` means the model must call one or more tools before responding to the user. /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + @override @_CreateThreadAndRunRequestToolChoiceConverter() @JsonKey(name: 'tool_choice', includeIfNull: false) CreateThreadAndRunRequestToolChoice? get toolChoice; + + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + /// during tool use. @override + @JsonKey(name: 'parallel_tool_calls', includeIfNull: false) + bool? get parallelToolCalls; - /// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + /// Specifies the format that the model must output. Compatible with + /// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + /// since `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + /// the model will match your supplied JSON schema. Learn more in the + /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + /// is valid JSON. /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + /// system or user message. Without this, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + /// that the message content may be partially cut off if `finish_reason="length"`, which indicates the + /// generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override @_CreateThreadAndRunRequestResponseFormatConverter() @JsonKey(name: 'response_format', includeIfNull: false) CreateThreadAndRunRequestResponseFormat? get responseFormat; - @override /// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + @override @JsonKey(includeIfNull: false) bool? get stream; + + /// Create a copy of CreateThreadAndRunRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestImplCopyWith<_$CreateThreadAndRunRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33581,6 +36431,8 @@ mixin _$ThreadAndRunModel { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ThreadAndRunModel to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -33600,6 +36452,9 @@ class _$ThreadAndRunModelCopyWithImpl<$Res, $Val extends ThreadAndRunModel> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -33622,6 +36477,8 @@ class __$$ThreadAndRunModelEnumerationImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33666,11 +36523,13 @@ class _$ThreadAndRunModelEnumerationImpl extends ThreadAndRunModelEnumeration { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelEnumerationImplCopyWith< @@ -33758,7 +36617,10 @@ abstract class ThreadAndRunModelEnumeration extends ThreadAndRunModel { @override ThreadAndRunModels get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelEnumerationImplCopyWith< _$ThreadAndRunModelEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -33783,6 +36645,8 @@ class __$$ThreadAndRunModelStringImplCopyWithImpl<$Res> $Res Function(_$ThreadAndRunModelStringImpl) _then) : super(_value, _then); + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -33826,11 +36690,13 @@ class _$ThreadAndRunModelStringImpl extends ThreadAndRunModelString { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> @@ -33917,7 +36783,10 @@ abstract class ThreadAndRunModelString extends ThreadAndRunModel { @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of ThreadAndRunModel + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadAndRunModelStringImplCopyWith<_$ThreadAndRunModelStringImpl> get copyWith => throw _privateConstructorUsedError; } @@ -33992,6 +36861,8 @@ mixin _$CreateThreadAndRunRequestToolChoice { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestToolChoice to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34014,6 +36885,9 @@ class _$CreateThreadAndRunRequestToolChoiceCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34039,6 +36913,8 @@ class __$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWithImpl<$Res> $Res Function(_$CreateThreadAndRunRequestToolChoiceEnumerationImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34085,11 +36961,13 @@ class _$CreateThreadAndRunRequestToolChoiceEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< @@ -34193,7 +37071,10 @@ abstract class CreateThreadAndRunRequestToolChoiceEnumeration @override CreateThreadAndRunRequestToolChoiceMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceEnumerationImplCopyWith< _$CreateThreadAndRunRequestToolChoiceEnumerationImpl> get copyWith => throw _privateConstructorUsedError; @@ -34230,6 +37111,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34244,6 +37127,8 @@ class __$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWi )); } + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $AssistantsNamedToolChoiceCopyWith<$Res> get value { @@ -34288,11 +37173,13 @@ class _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< @@ -34398,7 +37285,10 @@ abstract class CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice @override AssistantsNamedToolChoice get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestToolChoice + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImplCopyWith< _$CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoiceImpl> get copyWith => throw _privateConstructorUsedError; @@ -34410,9 +37300,9 @@ CreateThreadAndRunRequestResponseFormat switch (json['runtimeType']) { case 'mode': return CreateThreadAndRunRequestResponseFormatEnumeration.fromJson(json); - case 'format': - return CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - .fromJson(json); + case 'responseFormat': + return CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( + json); default: throw CheckedFromJsonException( @@ -34430,19 +37320,19 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; @@ -34452,9 +37342,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -34462,9 +37351,8 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) => throw _privateConstructorUsedError; @optionalTypeArgs @@ -34472,12 +37360,13 @@ mixin _$CreateThreadAndRunRequestResponseFormat { TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateThreadAndRunRequestResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -34501,6 +37390,9 @@ class _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -34531,6 +37423,8 @@ class __$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWithImpl< _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34577,11 +37471,13 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< @@ -34596,7 +37492,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { return mode(value); } @@ -34605,7 +37501,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { return mode?.call(value); } @@ -34614,7 +37510,7 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -34630,9 +37526,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { return mode(this); } @@ -34643,9 +37538,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { return mode?.call(this); } @@ -34656,9 +37550,8 @@ class _$CreateThreadAndRunRequestResponseFormatEnumerationImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { if (mode != null) { @@ -34688,63 +37581,66 @@ abstract class CreateThreadAndRunRequestResponseFormatEnumeration @override CreateThreadAndRunRequestResponseFormatMode get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadAndRunRequestResponseFormatEnumerationImplCopyWith< _$CreateThreadAndRunRequestResponseFormatEnumerationImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< +abstract class _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - factory _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - value, + factory _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl value, $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) then) = - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res>; @useResult - $Res call({AssistantsResponseFormat value}); + $Res call({ResponseFormat value}); - $AssistantsResponseFormatCopyWith<$Res> get value; + $ResponseFormatCopyWith<$Res> get value; } /// @nodoc -class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< +class __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< $Res> extends _$CreateThreadAndRunRequestResponseFormatCopyWithImpl<$Res, - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> implements - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< $Res> { - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - _value, - $Res Function( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl) + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl _value, + $Res Function(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? value = null, }) { - return _then( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( + return _then(_$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( null == value ? _value.value : value // ignore: cast_nullable_to_non_nullable - as AssistantsResponseFormat, + as ResponseFormat, )); } + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $AssistantsResponseFormatCopyWith<$Res> get value { - return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + $ResponseFormatCopyWith<$Res> get value { + return $ResponseFormatCopyWith<$Res>(_value.value, (value) { return _then(_value.copyWith(value: value)); }); } @@ -34752,28 +37648,27 @@ class __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCop /// @nodoc @JsonSerializable() -class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - extends CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat { - const _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - this.value, +class _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl + extends CreateThreadAndRunRequestResponseFormatResponseFormat { + const _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl(this.value, {final String? $type}) - : $type = $type ?? 'format', + : $type = $type ?? 'responseFormat', super._(); - factory _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + factory _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson( Map json) => - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplFromJson( + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplFromJson( json); @override - final AssistantsResponseFormat value; + final ResponseFormat value; @JsonKey(name: 'runtimeType') final String $type; @override String toString() { - return 'CreateThreadAndRunRequestResponseFormat.format(value: $value)'; + return 'CreateThreadAndRunRequestResponseFormat.responseFormat(value: $value)'; } @override @@ -34781,22 +37676,24 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl return identical(this, other) || (other.runtimeType == runtimeType && other - is _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl && + is _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl && (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => - __$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl>( + __$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWithImpl< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl>( this, _$identity); @override @@ -34804,29 +37701,29 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult when({ required TResult Function(CreateThreadAndRunRequestResponseFormatMode value) mode, - required TResult Function(AssistantsResponseFormat value) format, + required TResult Function(ResponseFormat value) responseFormat, }) { - return format(value); + return responseFormat(value); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult? Function(AssistantsResponseFormat value)? format, + TResult? Function(ResponseFormat value)? responseFormat, }) { - return format?.call(value); + return responseFormat?.call(value); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function(CreateThreadAndRunRequestResponseFormatMode value)? mode, - TResult Function(AssistantsResponseFormat value)? format, + TResult Function(ResponseFormat value)? responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(value); + if (responseFormat != null) { + return responseFormat(value); } return orElse(); } @@ -34838,11 +37735,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl CreateThreadAndRunRequestResponseFormatEnumeration value) mode, required TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value) - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value) + responseFormat, }) { - return format(this); + return responseFormat(this); } @override @@ -34851,11 +37747,10 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult? Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult? Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, }) { - return format?.call(this); + return responseFormat?.call(this); } @override @@ -34864,43 +37759,43 @@ class _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl TResult Function(CreateThreadAndRunRequestResponseFormatEnumeration value)? mode, TResult Function( - CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat - value)? - format, + CreateThreadAndRunRequestResponseFormatResponseFormat value)? + responseFormat, required TResult orElse(), }) { - if (format != null) { - return format(this); + if (responseFormat != null) { + return responseFormat(this); } return orElse(); } @override Map toJson() { - return _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( + return _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( this, ); } } -abstract class CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat +abstract class CreateThreadAndRunRequestResponseFormatResponseFormat extends CreateThreadAndRunRequestResponseFormat { - const factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat( - final AssistantsResponseFormat value) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl; - const CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat._() - : super._(); + const factory CreateThreadAndRunRequestResponseFormatResponseFormat( + final ResponseFormat value) = + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl; + const CreateThreadAndRunRequestResponseFormatResponseFormat._() : super._(); - factory CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat.fromJson( + factory CreateThreadAndRunRequestResponseFormatResponseFormat.fromJson( Map json) = - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - .fromJson; + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl.fromJson; @override - AssistantsResponseFormat get value; - @JsonKey(ignore: true) - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplCopyWith< - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl> + ResponseFormat get value; + + /// Create a copy of CreateThreadAndRunRequestResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplCopyWith< + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -34924,11 +37819,17 @@ mixin _$ThreadObject { @JsonKey(name: 'tool_resources') ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ThreadObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ThreadObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -34959,6 +37860,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -34992,6 +37895,8 @@ class _$ThreadObjectCopyWithImpl<$Res, $Val extends ThreadObject> ) as $Val); } + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35032,6 +37937,8 @@ class __$$ThreadObjectImplCopyWithImpl<$Res> _$ThreadObjectImpl _value, $Res Function(_$ThreadObjectImpl) _then) : super(_value, _then); + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35099,10 +38006,14 @@ class _$ThreadObjectImpl extends _ThreadObject { @JsonKey(name: 'tool_resources') final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -35131,12 +38042,14 @@ class _$ThreadObjectImpl extends _ThreadObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, createdAt, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => @@ -35163,30 +38076,34 @@ abstract class _ThreadObject extends ThreadObject { factory _ThreadObject.fromJson(Map json) = _$ThreadObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread`. - ThreadObjectObject get object; @override + ThreadObjectObject get object; /// The Unix timestamp (in seconds) for when the thread was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources') ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of ThreadObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadObjectImplCopyWith<_$ThreadObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35206,12 +38123,18 @@ mixin _$CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35241,6 +38164,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35264,6 +38189,8 @@ class _$CreateThreadRequestCopyWithImpl<$Res, $Val extends CreateThreadRequest> ) as $Val); } + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35303,6 +38230,8 @@ class __$$CreateThreadRequestImplCopyWithImpl<$Res> $Res Function(_$CreateThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35361,10 +38290,14 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35391,7 +38324,7 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -35399,7 +38332,9 @@ class _$CreateThreadRequestImpl extends _CreateThreadRequest { toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => @@ -35427,23 +38362,27 @@ abstract class _CreateThreadRequest extends CreateThreadRequest { factory _CreateThreadRequest.fromJson(Map json) = _$CreateThreadRequestImpl.fromJson; - @override - /// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. + @override @JsonKey(includeIfNull: false) List? get messages; - @override /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateThreadRequestImplCopyWith<_$CreateThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35458,12 +38397,18 @@ mixin _$ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyThreadRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyThreadRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35492,6 +38437,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35510,6 +38457,8 @@ class _$ModifyThreadRequestCopyWithImpl<$Res, $Val extends ModifyThreadRequest> ) as $Val); } + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCopyWith<$Res>? get toolResources { @@ -35548,6 +38497,8 @@ class __$$ModifyThreadRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyThreadRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35585,10 +38536,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { @JsonKey(name: 'tool_resources', includeIfNull: false) final ToolResources? toolResources; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -35614,12 +38569,14 @@ class _$ModifyThreadRequestImpl extends _ModifyThreadRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, toolResources, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => @@ -35645,18 +38602,22 @@ abstract class _ModifyThreadRequest extends ModifyThreadRequest { factory _ModifyThreadRequest.fromJson(Map json) = _$ModifyThreadRequestImpl.fromJson; - @override - /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + @override @JsonKey(name: 'tool_resources', includeIfNull: false) ToolResources? get toolResources; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyThreadRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyThreadRequestImplCopyWith<_$ModifyThreadRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35676,8 +38637,12 @@ mixin _$ToolResources { @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch => throw _privateConstructorUsedError; + /// Serializes this ToolResources to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35708,6 +38673,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35726,6 +38693,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> ) as $Val); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesCodeInterpreterCopyWith<$Res>? get codeInterpreter { @@ -35739,6 +38708,8 @@ class _$ToolResourcesCopyWithImpl<$Res, $Val extends ToolResources> }); } + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ToolResourcesFileSearchCopyWith<$Res>? get fileSearch { @@ -35780,6 +38751,8 @@ class __$$ToolResourcesImplCopyWithImpl<$Res> _$ToolResourcesImpl _value, $Res Function(_$ToolResourcesImpl) _then) : super(_value, _then); + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35837,11 +38810,13 @@ class _$ToolResourcesImpl extends _ToolResources { other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, codeInterpreter, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => @@ -35866,18 +38841,20 @@ abstract class _ToolResources extends ToolResources { factory _ToolResources.fromJson(Map json) = _$ToolResourcesImpl.fromJson; - @override - /// No Description + @override @JsonKey(name: 'code_interpreter', includeIfNull: false) ToolResourcesCodeInterpreter? get codeInterpreter; - @override /// No Description + @override @JsonKey(name: 'file_search', includeIfNull: false) ToolResourcesFileSearch? get fileSearch; + + /// Create a copy of ToolResources + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesImplCopyWith<_$ToolResourcesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -35893,8 +38870,12 @@ mixin _$ToolResourcesCodeInterpreter { @JsonKey(name: 'file_ids') List get fileIds => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesCodeInterpreterCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -35921,6 +38902,8 @@ class _$ToolResourcesCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -35957,6 +38940,8 @@ class __$$ToolResourcesCodeInterpreterImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36008,12 +38993,14 @@ class _$ToolResourcesCodeInterpreterImpl extends _ToolResourcesCodeInterpreter { const DeepCollectionEquality().equals(other._fileIds, _fileIds)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesCodeInterpreterImplCopyWith< @@ -36039,13 +39026,15 @@ abstract class _ToolResourcesCodeInterpreter factory _ToolResourcesCodeInterpreter.fromJson(Map json) = _$ToolResourcesCodeInterpreterImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + @override @JsonKey(name: 'file_ids') List get fileIds; + + /// Create a copy of ToolResourcesCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesCodeInterpreterImplCopyWith< _$ToolResourcesCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -36067,8 +39056,12 @@ mixin _$ToolResourcesFileSearch { List? get vectorStores => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36097,6 +39090,8 @@ class _$ToolResourcesFileSearchCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36142,6 +39137,8 @@ class __$$ToolResourcesFileSearchImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36220,14 +39217,16 @@ class _$ToolResourcesFileSearchImpl extends _ToolResourcesFileSearch { .equals(other._vectorStores, _vectorStores)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_vectorStoreIds), const DeepCollectionEquality().hash(_vectorStores)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> @@ -36254,18 +39253,20 @@ abstract class _ToolResourcesFileSearch extends ToolResourcesFileSearch { factory _ToolResourcesFileSearch.fromJson(Map json) = _$ToolResourcesFileSearchImpl.fromJson; - @override - /// The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_store_ids', includeIfNull: false) List? get vectorStoreIds; - @override /// A helper to create a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + @override @JsonKey(name: 'vector_stores', includeIfNull: false) List? get vectorStores; + + /// Create a copy of ToolResourcesFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchImplCopyWith<_$ToolResourcesFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36281,12 +39282,22 @@ mixin _$ToolResourcesFileSearchVectorStore { @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this ToolResourcesFileSearchVectorStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ToolResourcesFileSearchVectorStoreCopyWith< ToolResourcesFileSearchVectorStore> get copyWith => throw _privateConstructorUsedError; @@ -36302,7 +39313,11 @@ abstract class $ToolResourcesFileSearchVectorStoreCopyWith<$Res> { @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36316,10 +39331,13 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_value.copyWith( @@ -36327,12 +39345,31 @@ class _$ToolResourcesFileSearchVectorStoreCopyWithImpl<$Res, ? _value.fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as dynamic, ) as $Val); } + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc @@ -36346,7 +39383,12 @@ abstract class _$$ToolResourcesFileSearchVectorStoreImplCopyWith<$Res> @useResult $Res call( {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc @@ -36359,10 +39401,13 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> $Res Function(_$ToolResourcesFileSearchVectorStoreImpl) _then) : super(_value, _then); + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileIds = freezed, + Object? chunkingStrategy = freezed, Object? metadata = freezed, }) { return _then(_$ToolResourcesFileSearchVectorStoreImpl( @@ -36370,6 +39415,10 @@ class __$$ToolResourcesFileSearchVectorStoreImplCopyWithImpl<$Res> ? _value._fileIds : fileIds // ignore: cast_nullable_to_non_nullable as List?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, metadata: freezed == metadata ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable @@ -36385,6 +39434,8 @@ class _$ToolResourcesFileSearchVectorStoreImpl const _$ToolResourcesFileSearchVectorStoreImpl( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, @JsonKey(includeIfNull: false) this.metadata}) : _fileIds = fileIds, super._(); @@ -36407,6 +39458,12 @@ class _$ToolResourcesFileSearchVectorStoreImpl return EqualUnmodifiableListView(value); } + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) @@ -36414,7 +39471,7 @@ class _$ToolResourcesFileSearchVectorStoreImpl @override String toString() { - return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, metadata: $metadata)'; + return 'ToolResourcesFileSearchVectorStore(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override @@ -36423,17 +39480,22 @@ class _$ToolResourcesFileSearchVectorStoreImpl (other.runtimeType == runtimeType && other is _$ToolResourcesFileSearchVectorStoreImpl && const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_fileIds), + chunkingStrategy, const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ToolResourcesFileSearchVectorStoreImplCopyWith< @@ -36454,6 +39516,8 @@ abstract class _ToolResourcesFileSearchVectorStore const factory _ToolResourcesFileSearchVectorStore( {@JsonKey(name: 'file_ids', includeIfNull: false) final List? fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, @JsonKey(includeIfNull: false) final dynamic metadata}) = _$ToolResourcesFileSearchVectorStoreImpl; const _ToolResourcesFileSearchVectorStore._() : super._(); @@ -36462,18 +39526,26 @@ abstract class _ToolResourcesFileSearchVectorStore Map json) = _$ToolResourcesFileSearchVectorStoreImpl.fromJson; - @override - /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + @override @JsonKey(name: 'file_ids', includeIfNull: false) List? get fileIds; + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) dynamic get metadata; + + /// Create a copy of ToolResourcesFileSearchVectorStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ToolResourcesFileSearchVectorStoreImplCopyWith< _$ToolResourcesFileSearchVectorStoreImpl> get copyWith => throw _privateConstructorUsedError; @@ -36494,8 +39566,12 @@ mixin _$DeleteThreadResponse { /// The object type, which is always `thread.deleted`. DeleteThreadResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteThreadResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteThreadResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36520,6 +39596,8 @@ class _$DeleteThreadResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36563,6 +39641,8 @@ class __$$DeleteThreadResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteThreadResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36624,11 +39704,13 @@ class _$DeleteThreadResponseImpl extends _DeleteThreadResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> @@ -36655,20 +39737,22 @@ abstract class _DeleteThreadResponse extends DeleteThreadResponse { factory _DeleteThreadResponse.fromJson(Map json) = _$DeleteThreadResponseImpl.fromJson; - @override - /// The thread identifier. - String get id; @override + String get id; /// Whether the thread was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.deleted`. + @override DeleteThreadResponseObject get object; + + /// Create a copy of DeleteThreadResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteThreadResponseImplCopyWith<_$DeleteThreadResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36697,8 +39781,12 @@ mixin _$ListThreadsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListThreadsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListThreadsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -36727,6 +39815,8 @@ class _$ListThreadsResponseCopyWithImpl<$Res, $Val extends ListThreadsResponse> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36785,6 +39875,8 @@ class __$$ListThreadsResponseImplCopyWithImpl<$Res> $Res Function(_$ListThreadsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -36881,12 +39973,14 @@ class _$ListThreadsResponseImpl extends _ListThreadsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => @@ -36914,31 +40008,33 @@ abstract class _ListThreadsResponse extends ListThreadsResponse { factory _ListThreadsResponse.fromJson(Map json) = _$ListThreadsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of threads. - List get data; @override + List get data; /// The ID of the first thread in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last thread in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more threads to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListThreadsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListThreadsResponseImplCopyWith<_$ListThreadsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -36998,11 +40094,17 @@ mixin _$MessageObject { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this MessageObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37044,6 +40146,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37122,6 +40226,8 @@ class _$MessageObjectCopyWithImpl<$Res, $Val extends MessageObject> ) as $Val); } + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectIncompleteDetailsCopyWith<$Res>? get incompleteDetails { @@ -37174,6 +40280,8 @@ class __$$MessageObjectImplCopyWithImpl<$Res> _$MessageObjectImpl _value, $Res Function(_$MessageObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37356,10 +40464,14 @@ class _$MessageObjectImpl extends _MessageObject { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -37402,7 +40514,7 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -37421,7 +40533,9 @@ class _$MessageObjectImpl extends _MessageObject { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => @@ -37458,72 +40572,76 @@ abstract class _MessageObject extends MessageObject { factory _MessageObject.fromJson(Map json) = _$MessageObjectImpl.fromJson; - @override - /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message`. - MessageObjectObject get object; @override + MessageObjectObject get object; /// The Unix timestamp (in seconds) for when the message was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + @override @JsonKey(unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageObjectStatus? get status; - @override /// On an incomplete message, details about why the message is incomplete. + @override @JsonKey(name: 'incomplete_details') MessageObjectIncompleteDetails? get incompleteDetails; - @override /// The Unix timestamp (in seconds) for when the message was completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override /// The Unix timestamp (in seconds) for when the message was marked as incomplete. + @override @JsonKey(name: 'incomplete_at') int? get incompleteAt; - @override /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message in array of text and/or images. - List get content; @override + List get content; /// If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message. + @override @JsonKey(name: 'assistant_id') String? get assistantId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + @override @JsonKey(name: 'run_id') String? get runId; - @override /// A list of files attached to the message, and the tools they were added to. - List? get attachments; @override + List? get attachments; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override Map? get metadata; + + /// Create a copy of MessageObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectImplCopyWith<_$MessageObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37539,8 +40657,12 @@ mixin _$MessageObjectIncompleteDetails { MessageObjectIncompleteDetailsReason get reason => throw _privateConstructorUsedError; + /// Serializes this MessageObjectIncompleteDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageObjectIncompleteDetailsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37567,6 +40689,8 @@ class _$MessageObjectIncompleteDetailsCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37603,6 +40727,8 @@ class __$$MessageObjectIncompleteDetailsImplCopyWithImpl<$Res> $Res Function(_$MessageObjectIncompleteDetailsImpl) _then) : super(_value, _then); + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37645,11 +40771,13 @@ class _$MessageObjectIncompleteDetailsImpl (identical(other.reason, reason) || other.reason == reason)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, reason); - @JsonKey(ignore: true) + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageObjectIncompleteDetailsImplCopyWith< @@ -37675,12 +40803,14 @@ abstract class _MessageObjectIncompleteDetails factory _MessageObjectIncompleteDetails.fromJson(Map json) = _$MessageObjectIncompleteDetailsImpl.fromJson; - @override - /// The reason the message is incomplete. + @override MessageObjectIncompleteDetailsReason get reason; + + /// Create a copy of MessageObjectIncompleteDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageObjectIncompleteDetailsImplCopyWith< _$MessageObjectIncompleteDetailsImpl> get copyWith => throw _privateConstructorUsedError; @@ -37700,8 +40830,12 @@ mixin _$MessageAttachment { @JsonKey(includeIfNull: false) List? get tools => throw _privateConstructorUsedError; + /// Serializes this MessageAttachment to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageAttachmentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37727,6 +40861,8 @@ class _$MessageAttachmentCopyWithImpl<$Res, $Val extends MessageAttachment> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37767,6 +40903,8 @@ class __$$MessageAttachmentImplCopyWithImpl<$Res> $Res Function(_$MessageAttachmentImpl) _then) : super(_value, _then); + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37831,12 +40969,14 @@ class _$MessageAttachmentImpl extends _MessageAttachment { const DeepCollectionEquality().equals(other._tools, _tools)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, fileId, const DeepCollectionEquality().hash(_tools)); - @JsonKey(ignore: true) + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => @@ -37861,18 +41001,20 @@ abstract class _MessageAttachment extends MessageAttachment { factory _MessageAttachment.fromJson(Map json) = _$MessageAttachmentImpl.fromJson; - @override - /// The ID of the file to attach to the message. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The tools to add this file to. + @override @JsonKey(includeIfNull: false) List? get tools; + + /// Create a copy of MessageAttachment + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageAttachmentImplCopyWith<_$MessageAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -37892,8 +41034,12 @@ mixin _$MessageDeltaObject { /// The delta containing the fields that have changed on the Message. MessageDelta get delta => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -37919,6 +41065,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -37942,6 +41090,8 @@ class _$MessageDeltaObjectCopyWithImpl<$Res, $Val extends MessageDeltaObject> ) as $Val); } + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaCopyWith<$Res> get delta { @@ -37973,6 +41123,8 @@ class __$$MessageDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38034,11 +41186,13 @@ class _$MessageDeltaObjectImpl extends _MessageDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => @@ -38063,20 +41217,22 @@ abstract class _MessageDeltaObject extends MessageDeltaObject { factory _MessageDeltaObject.fromJson(Map json) = _$MessageDeltaObjectImpl.fromJson; - @override - /// The identifier of the message, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.message.delta`. - MessageDeltaObjectObject get object; @override + MessageDeltaObjectObject get object; /// The delta containing the fields that have changed on the Message. + @override MessageDelta get delta; + + /// Create a copy of MessageDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaObjectImplCopyWith<_$MessageDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38096,8 +41252,12 @@ mixin _$MessageDelta { @JsonKey(includeIfNull: false) List? get content => throw _privateConstructorUsedError; + /// Serializes this MessageDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38126,6 +41286,8 @@ class _$MessageDeltaCopyWithImpl<$Res, $Val extends MessageDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38169,6 +41331,8 @@ class __$$MessageDeltaImplCopyWithImpl<$Res> _$MessageDeltaImpl _value, $Res Function(_$MessageDeltaImpl) _then) : super(_value, _then); + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38237,12 +41401,14 @@ class _$MessageDeltaImpl extends _MessageDelta { const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, role, const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => @@ -38269,19 +41435,21 @@ abstract class _MessageDelta extends MessageDelta { factory _MessageDelta.fromJson(Map json) = _$MessageDeltaImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. + @override @JsonKey( includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) MessageRole? get role; - @override /// The content of the message in array of text and/or images. + @override @JsonKey(includeIfNull: false) List? get content; + + /// Create a copy of MessageDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaImplCopyWith<_$MessageDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38304,12 +41472,18 @@ mixin _$CreateMessageRequest { List? get attachments => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this CreateMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $CreateMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -38341,6 +41515,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38369,6 +41545,8 @@ class _$CreateMessageRequestCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $CreateMessageRequestContentCopyWith<$Res> get content { @@ -38405,6 +41583,8 @@ class __$$CreateMessageRequestImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38472,10 +41652,14 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { return EqualUnmodifiableListView(value); } - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -38503,7 +41687,7 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -38512,7 +41696,9 @@ class _$CreateMessageRequestImpl extends _CreateMessageRequest { const DeepCollectionEquality().hash(_attachments), const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> @@ -38541,27 +41727,31 @@ abstract class _CreateMessageRequest extends CreateMessageRequest { factory _CreateMessageRequest.fromJson(Map json) = _$CreateMessageRequestImpl.fromJson; - @override - /// The entity that produced the message. One of `user` or `assistant`. - MessageRole get role; @override + MessageRole get role; /// The content of the message. + @override @_CreateMessageRequestContentConverter() CreateMessageRequestContent get content; - @override /// A list of files attached to the message, and the tools they were added to. + @override @JsonKey(includeIfNull: false) List? get attachments; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of CreateMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestImplCopyWith<_$CreateMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -38628,6 +41818,8 @@ mixin _$CreateMessageRequestContent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this CreateMessageRequestContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -38650,6 +41842,9 @@ class _$CreateMessageRequestContentCopyWithImpl<$Res, final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -38675,6 +41870,8 @@ class __$$CreateMessageRequestContentListMessageContentImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentListMessageContentImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38728,12 +41925,14 @@ class _$CreateMessageRequestContentListMessageContentImpl const DeepCollectionEquality().equals(other._value, _value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentListMessageContentImplCopyWith< @@ -38830,7 +42029,10 @@ abstract class CreateMessageRequestContentListMessageContent @override List get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentListMessageContentImplCopyWith< _$CreateMessageRequestContentListMessageContentImpl> get copyWith => throw _privateConstructorUsedError; @@ -38856,6 +42058,8 @@ class __$$CreateMessageRequestContentStringImplCopyWithImpl<$Res> $Res Function(_$CreateMessageRequestContentStringImpl) _then) : super(_value, _then); + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -38902,11 +42106,13 @@ class _$CreateMessageRequestContentStringImpl (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, value); - @JsonKey(ignore: true) + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$CreateMessageRequestContentStringImplCopyWith< @@ -39000,7 +42206,10 @@ abstract class CreateMessageRequestContentString @override String get value; - @JsonKey(ignore: true) + + /// Create a copy of CreateMessageRequestContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$CreateMessageRequestContentStringImplCopyWith< _$CreateMessageRequestContentStringImpl> get copyWith => throw _privateConstructorUsedError; @@ -39012,12 +42221,18 @@ ModifyMessageRequest _$ModifyMessageRequestFromJson(Map json) { /// @nodoc mixin _$ModifyMessageRequest { - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// Serializes this ModifyMessageRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ModifyMessageRequestCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39042,6 +42257,8 @@ class _$ModifyMessageRequestCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39075,6 +42292,8 @@ class __$$ModifyMessageRequestImplCopyWithImpl<$Res> $Res Function(_$ModifyMessageRequestImpl) _then) : super(_value, _then); + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39100,10 +42319,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { factory _$ModifyMessageRequestImpl.fromJson(Map json) => _$$ModifyMessageRequestImplFromJson(json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override @JsonKey(includeIfNull: false) Map? get metadata { @@ -39127,12 +42350,14 @@ class _$ModifyMessageRequestImpl extends _ModifyMessageRequest { const DeepCollectionEquality().equals(other._metadata, _metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_metadata)); - @JsonKey(ignore: true) + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> @@ -39157,13 +42382,17 @@ abstract class _ModifyMessageRequest extends ModifyMessageRequest { factory _ModifyMessageRequest.fromJson(Map json) = _$ModifyMessageRequestImpl.fromJson; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? get metadata; + + /// Create a copy of ModifyMessageRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ModifyMessageRequestImplCopyWith<_$ModifyMessageRequestImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39184,8 +42413,12 @@ mixin _$DeleteMessageResponse { /// The object type, which is always `thread.message.deleted`. DeleteMessageResponseObject get object => throw _privateConstructorUsedError; + /// Serializes this DeleteMessageResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DeleteMessageResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39210,6 +42443,8 @@ class _$DeleteMessageResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39255,6 +42490,8 @@ class __$$DeleteMessageResponseImplCopyWithImpl<$Res> $Res Function(_$DeleteMessageResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39316,11 +42553,13 @@ class _$DeleteMessageResponseImpl extends _DeleteMessageResponse { (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> @@ -39346,20 +42585,22 @@ abstract class _DeleteMessageResponse extends DeleteMessageResponse { factory _DeleteMessageResponse.fromJson(Map json) = _$DeleteMessageResponseImpl.fromJson; - @override - /// The message identifier. - String get id; @override + String get id; /// Whether the message was deleted. - bool get deleted; @override + bool get deleted; /// The object type, which is always `thread.message.deleted`. + @override DeleteMessageResponseObject get object; + + /// Create a copy of DeleteMessageResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DeleteMessageResponseImplCopyWith<_$DeleteMessageResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39388,8 +42629,12 @@ mixin _$ListMessagesResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListMessagesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListMessagesResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39419,6 +42664,8 @@ class _$ListMessagesResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39477,6 +42724,8 @@ class __$$ListMessagesResponseImplCopyWithImpl<$Res> $Res Function(_$ListMessagesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39573,12 +42822,14 @@ class _$ListMessagesResponseImpl extends _ListMessagesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> @@ -39607,31 +42858,33 @@ abstract class _ListMessagesResponse extends ListMessagesResponse { factory _ListMessagesResponse.fromJson(Map json) = _$ListMessagesResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of messages. - List get data; @override + List get data; /// The ID of the first message in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last message in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more messages to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListMessagesResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListMessagesResponseImplCopyWith<_$ListMessagesResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39650,8 +42903,12 @@ mixin _$MessageContentImageFile { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageFile to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageFileCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39678,6 +42935,8 @@ class _$MessageContentImageFileCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39721,6 +42980,8 @@ class __$$MessageContentImageFileImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageFileImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39775,11 +43036,13 @@ class _$MessageContentImageFileImpl extends _MessageContentImageFile { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> @@ -39803,17 +43066,19 @@ abstract class _MessageContentImageFile extends MessageContentImageFile { factory _MessageContentImageFile.fromJson(Map json) = _$MessageContentImageFileImpl.fromJson; - @override - /// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + @override @JsonKey(name: 'file_id') String get fileId; - @override /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageFile + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageFileImplCopyWith<_$MessageContentImageFileImpl> get copyWith => throw _privateConstructorUsedError; } @@ -39831,8 +43096,12 @@ mixin _$MessageContentImageUrl { /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. MessageContentImageDetail get detail => throw _privateConstructorUsedError; + /// Serializes this MessageContentImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentImageUrlCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -39857,6 +43126,8 @@ class _$MessageContentImageUrlCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39898,6 +43169,8 @@ class __$$MessageContentImageUrlImplCopyWithImpl<$Res> $Res Function(_$MessageContentImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -39950,11 +43223,13 @@ class _$MessageContentImageUrlImpl extends _MessageContentImageUrl { (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> @@ -39978,16 +43253,18 @@ abstract class _MessageContentImageUrl extends MessageContentImageUrl { factory _MessageContentImageUrl.fromJson(Map json) = _$MessageContentImageUrlImpl.fromJson; - @override - /// The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp. - String get url; @override + String get url; /// Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + @override MessageContentImageDetail get detail; + + /// Create a copy of MessageContentImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentImageUrlImplCopyWith<_$MessageContentImageUrlImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40005,8 +43282,12 @@ mixin _$MessageRequestContentTextObject { /// Text content to be sent to the model String get text => throw _privateConstructorUsedError; + /// Serializes this MessageRequestContentTextObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageRequestContentTextObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40033,6 +43314,8 @@ class _$MessageRequestContentTextObjectCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40074,6 +43357,8 @@ class __$$MessageRequestContentTextObjectImplCopyWithImpl<$Res> $Res Function(_$MessageRequestContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40127,11 +43412,13 @@ class _$MessageRequestContentTextObjectImpl (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageRequestContentTextObjectImplCopyWith< @@ -40157,16 +43444,18 @@ abstract class _MessageRequestContentTextObject factory _MessageRequestContentTextObject.fromJson(Map json) = _$MessageRequestContentTextObjectImpl.fromJson; - @override - /// Always `text`. - String get type; @override + String get type; /// Text content to be sent to the model + @override String get text; + + /// Create a copy of MessageRequestContentTextObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageRequestContentTextObjectImplCopyWith< _$MessageRequestContentTextObjectImpl> get copyWith => throw _privateConstructorUsedError; @@ -40186,8 +43475,12 @@ mixin _$MessageContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40214,6 +43507,8 @@ class _$MessageContentTextCopyWithImpl<$Res, $Val extends MessageContentText> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40255,6 +43550,8 @@ class __$$MessageContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40320,12 +43617,14 @@ class _$MessageContentTextImpl extends _MessageContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => @@ -40351,17 +43650,19 @@ abstract class _MessageContentText extends MessageContentText { factory _MessageContentText.fromJson(Map json) = _$MessageContentTextImpl.fromJson; - @override - /// The data that makes up the text. - String get value; @override + String get value; /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextImplCopyWith<_$MessageContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -40378,11 +43679,12 @@ mixin _$MessageContentTextAnnotationsFileCitation { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; - /// The specific quote in the file. - String get quote => throw _privateConstructorUsedError; - + /// Serializes this MessageContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageContentTextAnnotationsFileCitationCopyWith< MessageContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -40396,7 +43698,7 @@ abstract class $MessageContentTextAnnotationsFileCitationCopyWith<$Res> { _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, MessageContentTextAnnotationsFileCitation>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40411,21 +43713,18 @@ class _$MessageContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileId = null, - Object? quote = null, }) { return _then(_value.copyWith( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String, ) as $Val); } } @@ -40439,7 +43738,7 @@ abstract class _$$MessageContentTextAnnotationsFileCitationImplCopyWith<$Res> __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId, String quote}); + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc @@ -40452,21 +43751,18 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? fileId = null, - Object? quote = null, }) { return _then(_$MessageContentTextAnnotationsFileCitationImpl( fileId: null == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable as String, - quote: null == quote - ? _value.quote - : quote // ignore: cast_nullable_to_non_nullable - as String, )); } } @@ -40476,7 +43772,7 @@ class __$$MessageContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> class _$MessageContentTextAnnotationsFileCitationImpl extends _MessageContentTextAnnotationsFileCitation { const _$MessageContentTextAnnotationsFileCitationImpl( - {@JsonKey(name: 'file_id') required this.fileId, required this.quote}) + {@JsonKey(name: 'file_id') required this.fileId}) : super._(); factory _$MessageContentTextAnnotationsFileCitationImpl.fromJson( @@ -40488,13 +43784,9 @@ class _$MessageContentTextAnnotationsFileCitationImpl @JsonKey(name: 'file_id') final String fileId; - /// The specific quote in the file. - @override - final String quote; - @override String toString() { - return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId, quote: $quote)'; + return 'MessageContentTextAnnotationsFileCitation(fileId: $fileId)'; } @override @@ -40502,15 +43794,16 @@ class _$MessageContentTextAnnotationsFileCitationImpl return identical(this, other) || (other.runtimeType == runtimeType && other is _$MessageContentTextAnnotationsFileCitationImpl && - (identical(other.fileId, fileId) || other.fileId == fileId) && - (identical(other.quote, quote) || other.quote == quote)); + (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, fileId, quote); + int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageContentTextAnnotationsFileCitationImplCopyWith< @@ -40531,8 +43824,7 @@ class _$MessageContentTextAnnotationsFileCitationImpl abstract class _MessageContentTextAnnotationsFileCitation extends MessageContentTextAnnotationsFileCitation { const factory _MessageContentTextAnnotationsFileCitation( - {@JsonKey(name: 'file_id') required final String fileId, - required final String quote}) = + {@JsonKey(name: 'file_id') required final String fileId}) = _$MessageContentTextAnnotationsFileCitationImpl; const _MessageContentTextAnnotationsFileCitation._() : super._(); @@ -40540,265 +43832,20 @@ abstract class _MessageContentTextAnnotationsFileCitation Map json) = _$MessageContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id') String get fileId; - @override - /// The specific quote in the file. - String get quote; + /// Create a copy of MessageContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageContentTextAnnotationsFileCitationImplCopyWith< _$MessageContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentImageUrlObject _$MessageDeltaContentImageUrlObjectFromJson( - Map json) { - return _MessageDeltaContentImageUrlObject.fromJson(json); -} - -/// @nodoc -mixin _$MessageDeltaContentImageUrlObject { - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) - int? get index => throw _privateConstructorUsedError; - - /// Always `image_url`. - @JsonKey(includeIfNull: false) - String? get type => throw _privateConstructorUsedError; - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaContentImageUrlObjectCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory $MessageDeltaContentImageUrlObjectCopyWith( - MessageDeltaContentImageUrlObject value, - $Res Function(MessageDeltaContentImageUrlObject) then) = - _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - MessageDeltaContentImageUrlObject>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - $Val extends MessageDeltaContentImageUrlObject> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - _$MessageDeltaContentImageUrlObjectCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_value.copyWith( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - ) as $Val); - } - - @override - @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res>? get imageUrl { - if (_value.imageUrl == null) { - return null; - } - - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { - return _then(_value.copyWith(imageUrl: value) as $Val); - }); - } -} - -/// @nodoc -abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> - implements $MessageDeltaContentImageUrlObjectCopyWith<$Res> { - factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( - _$MessageDeltaContentImageUrlObjectImpl value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) int? index, - @JsonKey(includeIfNull: false) String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? imageUrl}); - - @override - $MessageContentImageUrlCopyWith<$Res>? get imageUrl; -} - -/// @nodoc -class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentImageUrlObjectCopyWithImpl<$Res, - _$MessageDeltaContentImageUrlObjectImpl> - implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( - _$MessageDeltaContentImageUrlObjectImpl _value, - $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? index = freezed, - Object? type = freezed, - Object? imageUrl = freezed, - }) { - return _then(_$MessageDeltaContentImageUrlObjectImpl( - index: freezed == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int?, - type: freezed == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String?, - imageUrl: freezed == imageUrl - ? _value.imageUrl - : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$MessageDeltaContentImageUrlObjectImpl - extends _MessageDeltaContentImageUrlObject { - const _$MessageDeltaContentImageUrlObjectImpl( - {@JsonKey(includeIfNull: false) this.index, - @JsonKey(includeIfNull: false) this.type, - @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) - : super._(); - - factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( - Map json) => - _$$MessageDeltaContentImageUrlObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - @JsonKey(includeIfNull: false) - final int? index; - - /// Always `image_url`. - @override - @JsonKey(includeIfNull: false) - final String? type; - - /// The image URL part of a message. - @override - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl; - - @override - String toString() { - return 'MessageDeltaContentImageUrlObject(index: $index, type: $type, imageUrl: $imageUrl)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageUrlObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type) && - (identical(other.imageUrl, imageUrl) || - other.imageUrl == imageUrl)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< - _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); - - @override - Map toJson() { - return _$$MessageDeltaContentImageUrlObjectImplToJson( - this, - ); - } -} - -abstract class _MessageDeltaContentImageUrlObject - extends MessageDeltaContentImageUrlObject { - const factory _MessageDeltaContentImageUrlObject( - {@JsonKey(includeIfNull: false) final int? index, - @JsonKey(includeIfNull: false) final String? type, - @JsonKey(name: 'image_url', includeIfNull: false) - final MessageContentImageUrl? imageUrl}) = - _$MessageDeltaContentImageUrlObjectImpl; - const _MessageDeltaContentImageUrlObject._() : super._(); - - factory _MessageDeltaContentImageUrlObject.fromJson( - Map json) = - _$MessageDeltaContentImageUrlObjectImpl.fromJson; - - @override - - /// The index of the content part in the message. - @JsonKey(includeIfNull: false) - int? get index; - @override - - /// Always `image_url`. - @JsonKey(includeIfNull: false) - String? get type; - @override - - /// The image URL part of a message. - @JsonKey(name: 'image_url', includeIfNull: false) - MessageContentImageUrl? get imageUrl; - @override - @JsonKey(ignore: true) - _$$MessageDeltaContentImageUrlObjectImplCopyWith< - _$MessageDeltaContentImageUrlObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - MessageDeltaContentText _$MessageDeltaContentTextFromJson( Map json) { return _MessageDeltaContentText.fromJson(json); @@ -40815,8 +43862,12 @@ mixin _$MessageDeltaContentText { List? get annotations => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentText to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -40844,6 +43895,8 @@ class _$MessageDeltaContentTextCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40888,6 +43941,8 @@ class __$$MessageDeltaContentTextImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -40954,12 +44009,14 @@ class _$MessageDeltaContentTextImpl extends _MessageDeltaContentText { .equals(other._annotations, _annotations)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, value, const DeepCollectionEquality().hash(_annotations)); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> @@ -40985,18 +44042,20 @@ abstract class _MessageDeltaContentText extends MessageDeltaContentText { factory _MessageDeltaContentText.fromJson(Map json) = _$MessageDeltaContentTextImpl.fromJson; - @override - /// The data that makes up the text. + @override @JsonKey(includeIfNull: false) String? get value; - @override /// A list of annotations that point to specific quotes from specific files. + @override @JsonKey(includeIfNull: false) List? get annotations; + + /// Create a copy of MessageDeltaContentText + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextImplCopyWith<_$MessageDeltaContentTextImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41017,8 +44076,12 @@ mixin _$MessageDeltaContentTextAnnotationsFileCitation { @JsonKey(includeIfNull: false) String? get quote => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFileCitation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $MessageDeltaContentTextAnnotationsFileCitationCopyWith< MessageDeltaContentTextAnnotationsFileCitation> get copyWith => throw _privateConstructorUsedError; @@ -41049,6 +44112,8 @@ class _$MessageDeltaContentTextAnnotationsFileCitationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41095,6 +44160,8 @@ class __$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWithImpl<$Res> $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41151,11 +44218,13 @@ class _$MessageDeltaContentTextAnnotationsFileCitationImpl (identical(other.quote, quote) || other.quote == quote)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId, quote); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< @@ -41185,18 +44254,20 @@ abstract class _MessageDeltaContentTextAnnotationsFileCitation Map json) = _$MessageDeltaContentTextAnnotationsFileCitationImpl.fromJson; - @override - /// The ID of the specific File the citation is from. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; - @override /// The specific quote in the file. + @override @JsonKey(includeIfNull: false) String? get quote; + + /// Create a copy of MessageDeltaContentTextAnnotationsFileCitation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageDeltaContentTextAnnotationsFileCitationImplCopyWith< _$MessageDeltaContentTextAnnotationsFileCitationImpl> get copyWith => throw _privateConstructorUsedError; @@ -41261,14 +44332,20 @@ mixin _$RunStepObject { @JsonKey(name: 'completed_at') int? get completedAt => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. Map? get metadata => throw _privateConstructorUsedError; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. RunStepCompletionUsage? get usage => throw _privateConstructorUsedError; + /// Serializes this RunStepObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41312,6 +44389,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41400,6 +44479,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> ) as $Val); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDetailsCopyWith<$Res> get stepDetails { @@ -41408,6 +44489,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepLastErrorCopyWith<$Res>? get lastError { @@ -41420,6 +44503,8 @@ class _$RunStepObjectCopyWithImpl<$Res, $Val extends RunStepObject> }); } + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepCompletionUsageCopyWith<$Res>? get usage { @@ -41475,6 +44560,8 @@ class __$$RunStepObjectImplCopyWithImpl<$Res> _$RunStepObjectImpl _value, $Res Function(_$RunStepObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41657,10 +44744,14 @@ class _$RunStepObjectImpl extends _RunStepObject { @JsonKey(name: 'completed_at') final int? completedAt; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. final Map? _metadata; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override Map? get metadata { final value = _metadata; @@ -41711,7 +44802,7 @@ class _$RunStepObjectImpl extends _RunStepObject { (identical(other.usage, usage) || other.usage == usage)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -41732,7 +44823,9 @@ class _$RunStepObjectImpl extends _RunStepObject { const DeepCollectionEquality().hash(_metadata), usage); - @JsonKey(ignore: true) + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => @@ -41769,83 +44862,87 @@ abstract class _RunStepObject extends RunStepObject { factory _RunStepObject.fromJson(Map json) = _$RunStepObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step`. - RunStepObjectObject get object; @override + RunStepObjectObject get object; /// The Unix timestamp (in seconds) for when the run step was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step. + @override @JsonKey(name: 'assistant_id') String get assistantId; - @override /// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + @override @JsonKey(name: 'thread_id') String get threadId; - @override /// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of. + @override @JsonKey(name: 'run_id') String get runId; - @override /// The type of run step, which can be either `message_creation` or `tool_calls`. - RunStepType get type; @override + RunStepType get type; /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. - RunStepStatus get status; @override + RunStepStatus get status; /// The details of the run step. /// Any of: [RunStepDetailsMessageCreationObject], [RunStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details') RunStepDetails get stepDetails; - @override /// The last error associated with this run step. Will be `null` if there are no errors. + @override @JsonKey(name: 'last_error') RunStepLastError? get lastError; - @override /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + @override @JsonKey(name: 'expired_at') int? get expiredAt; - @override /// The Unix timestamp (in seconds) for when the run step was cancelled. + @override @JsonKey(name: 'cancelled_at') int? get cancelledAt; - @override /// The Unix timestamp (in seconds) for when the run step failed. + @override @JsonKey(name: 'failed_at') int? get failedAt; - @override /// The Unix timestamp (in seconds) for when the run step completed. + @override @JsonKey(name: 'completed_at') int? get completedAt; - @override - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - Map? get metadata; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + Map? get metadata; /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + @override RunStepCompletionUsage? get usage; + + /// Create a copy of RunStepObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepObjectImplCopyWith<_$RunStepObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -41862,8 +44959,12 @@ mixin _$RunStepLastError { /// A human-readable description of the error. String get message => throw _privateConstructorUsedError; + /// Serializes this RunStepLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -41887,6 +44988,8 @@ class _$RunStepLastErrorCopyWithImpl<$Res, $Val extends RunStepLastError> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41925,6 +45028,8 @@ class __$$RunStepLastErrorImplCopyWithImpl<$Res> $Res Function(_$RunStepLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -41975,11 +45080,13 @@ class _$RunStepLastErrorImpl extends _RunStepLastError { (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => @@ -42003,16 +45110,18 @@ abstract class _RunStepLastError extends RunStepLastError { factory _RunStepLastError.fromJson(Map json) = _$RunStepLastErrorImpl.fromJson; - @override - /// One of `server_error` or `rate_limit_exceeded`. - RunStepLastErrorCode get code; @override + RunStepLastErrorCode get code; /// A human-readable description of the error. + @override String get message; + + /// Create a copy of RunStepLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepLastErrorImplCopyWith<_$RunStepLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42032,8 +45141,12 @@ mixin _$RunStepDeltaObject { /// The delta containing the fields that have changed on the run step. RunStepDelta get delta => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaObjectCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42059,6 +45172,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42082,6 +45197,8 @@ class _$RunStepDeltaObjectCopyWithImpl<$Res, $Val extends RunStepDeltaObject> ) as $Val); } + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaCopyWith<$Res> get delta { @@ -42113,6 +45230,8 @@ class __$$RunStepDeltaObjectImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42174,11 +45293,13 @@ class _$RunStepDeltaObjectImpl extends _RunStepDeltaObject { (identical(other.delta, delta) || other.delta == delta)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, object, delta); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => @@ -42203,20 +45324,22 @@ abstract class _RunStepDeltaObject extends RunStepDeltaObject { factory _RunStepDeltaObject.fromJson(Map json) = _$RunStepDeltaObjectImpl.fromJson; - @override - /// The identifier of the run step, which can be referenced in API endpoints. - String get id; @override + String get id; /// The object type, which is always `thread.run.step.delta`. - RunStepDeltaObjectObject get object; @override + RunStepDeltaObjectObject get object; /// The delta containing the fields that have changed on the run step. + @override RunStepDelta get delta; + + /// Create a copy of RunStepDeltaObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaObjectImplCopyWith<_$RunStepDeltaObjectImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42232,8 +45355,12 @@ mixin _$RunStepDelta { @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails => throw _privateConstructorUsedError; + /// Serializes this RunStepDelta to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42261,6 +45388,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42274,6 +45403,8 @@ class _$RunStepDeltaCopyWithImpl<$Res, $Val extends RunStepDelta> ) as $Val); } + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaDetailsCopyWith<$Res>? get stepDetails { @@ -42311,6 +45442,8 @@ class __$$RunStepDeltaImplCopyWithImpl<$Res> _$RunStepDeltaImpl _value, $Res Function(_$RunStepDeltaImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42355,11 +45488,13 @@ class _$RunStepDeltaImpl extends _RunStepDelta { other.stepDetails == stepDetails)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, stepDetails); - @JsonKey(ignore: true) + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => @@ -42382,14 +45517,16 @@ abstract class _RunStepDelta extends RunStepDelta { factory _RunStepDelta.fromJson(Map json) = _$RunStepDeltaImpl.fromJson; - @override - /// The details of the run step /// Any of: [RunStepDeltaStepDetailsMessageCreationObject], [RunStepDeltaStepDetailsToolCallsObject] + @override @JsonKey(name: 'step_details', includeIfNull: false) RunStepDeltaDetails? get stepDetails; + + /// Create a copy of RunStepDelta + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaImplCopyWith<_$RunStepDeltaImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42418,8 +45555,12 @@ mixin _$ListRunStepsResponse { @JsonKey(name: 'has_more') bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListRunStepsResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ListRunStepsResponseCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42449,6 +45590,8 @@ class _$ListRunStepsResponseCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42507,6 +45650,8 @@ class __$$ListRunStepsResponseImplCopyWithImpl<$Res> $Res Function(_$ListRunStepsResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42603,12 +45748,14 @@ class _$ListRunStepsResponseImpl extends _ListRunStepsResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> @@ -42637,31 +45784,33 @@ abstract class _ListRunStepsResponse extends ListRunStepsResponse { factory _ListRunStepsResponse.fromJson(Map json) = _$ListRunStepsResponseImpl.fromJson; - @override - /// The object type, which is always `list`. - String get object; @override + String get object; /// The list of run steps. - List get data; @override + List get data; /// The ID of the first run step in the list. + @override @JsonKey(name: 'first_id') String get firstId; - @override /// The ID of the last run step in the list. + @override @JsonKey(name: 'last_id') String get lastId; - @override /// Whether there are more run steps to retrieve. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListRunStepsResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ListRunStepsResponseImplCopyWith<_$ListRunStepsResponseImpl> get copyWith => throw _privateConstructorUsedError; } @@ -42677,8 +45826,12 @@ mixin _$RunStepDetailsMessageCreation { @JsonKey(name: 'message_id') String get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsMessageCreationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -42705,6 +45858,8 @@ class _$RunStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42741,6 +45896,8 @@ class __$$RunStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42786,11 +45943,13 @@ class _$RunStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsMessageCreationImplCopyWith< @@ -42816,13 +45975,15 @@ abstract class _RunStepDetailsMessageCreation factory _RunStepDetailsMessageCreation.fromJson(Map json) = _$RunStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id') String get messageId; + + /// Create a copy of RunStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsMessageCreationImplCopyWith< _$RunStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -42840,8 +46001,12 @@ mixin _$RunStepDeltaStepDetailsMessageCreation { @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsMessageCreation to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsMessageCreationCopyWith< RunStepDeltaStepDetailsMessageCreation> get copyWith => throw _privateConstructorUsedError; @@ -42870,6 +46035,8 @@ class _$RunStepDeltaStepDetailsMessageCreationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42907,6 +46074,8 @@ class __$$RunStepDeltaStepDetailsMessageCreationImplCopyWithImpl<$Res> $Res Function(_$RunStepDeltaStepDetailsMessageCreationImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -42952,11 +46121,13 @@ class _$RunStepDeltaStepDetailsMessageCreationImpl other.messageId == messageId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, messageId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< @@ -42984,13 +46155,15 @@ abstract class _RunStepDeltaStepDetailsMessageCreation Map json) = _$RunStepDeltaStepDetailsMessageCreationImpl.fromJson; - @override - /// The ID of the message that was created by this run step. + @override @JsonKey(name: 'message_id', includeIfNull: false) String? get messageId; + + /// Create a copy of RunStepDeltaStepDetailsMessageCreation + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsMessageCreationImplCopyWith< _$RunStepDeltaStepDetailsMessageCreationImpl> get copyWith => throw _privateConstructorUsedError; @@ -43011,8 +46184,12 @@ mixin _$RunStepDetailsToolCallsCodeObjectCodeInterpreter { List get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43042,6 +46219,8 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43088,6 +46267,8 @@ class __$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43150,12 +46331,14 @@ class _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43185,16 +46368,18 @@ abstract class _RunStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. - String get input; @override + String get input; /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override List get outputs; + + /// Create a copy of RunStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43218,8 +46403,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter { List? get outputs => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter> get copyWith => throw _privateConstructorUsedError; @@ -43257,6 +46446,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWithImpl< // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43312,6 +46503,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWithI _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43382,12 +46575,14 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl const DeepCollectionEquality().equals(other._outputs, _outputs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, input, const DeepCollectionEquality().hash(_outputs)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< @@ -43419,18 +46614,20 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter Map json) = _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl.fromJson; - @override - /// The input to the Code Interpreter tool call. + @override @JsonKey(includeIfNull: false) String? get input; - @override /// The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + @override @JsonKey(includeIfNull: false) List? get outputs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; @@ -43448,8 +46645,12 @@ mixin _$RunStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id') String get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -43477,6 +46678,8 @@ class _$RunStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43513,6 +46716,8 @@ class __$$RunStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> $Res Function(_$RunStepDetailsToolCallsCodeOutputImageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43557,11 +46762,13 @@ class _$RunStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -43589,13 +46796,15 @@ abstract class _RunStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id') String get fileId; + + /// Create a copy of RunStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; @@ -43613,8 +46822,12 @@ mixin _$RunStepDeltaStepDetailsToolCallsCodeOutputImage { @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutputImage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith< RunStepDeltaStepDetailsToolCallsCodeOutputImage> get copyWith => throw _privateConstructorUsedError; @@ -43643,6 +46856,8 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43683,6 +46898,8 @@ class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWithImpl<$Res> _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -43727,11 +46944,13 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< @@ -43761,2962 +46980,2940 @@ abstract class _RunStepDeltaStepDetailsToolCallsCodeOutputImage Map json) = _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl.fromJson; - @override - /// The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. + @override @JsonKey(name: 'file_id', includeIfNull: false) String? get fileId; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutputImage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageImplCopyWith< _$RunStepDeltaStepDetailsToolCallsCodeOutputImageImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepCompletionUsage _$RunStepCompletionUsageFromJson( +RunStepDetailsToolCallsFileSearch _$RunStepDetailsToolCallsFileSearchFromJson( Map json) { - return _RunStepCompletionUsage.fromJson(json); + return _RunStepDetailsToolCallsFileSearch.fromJson(json); } /// @nodoc -mixin _$RunStepCompletionUsage { - /// Number of completion tokens used over the course of the run step. - @JsonKey(name: 'completion_tokens') - int get completionTokens => throw _privateConstructorUsedError; - - /// Number of prompt tokens used over the course of the run step. - @JsonKey(name: 'prompt_tokens') - int get promptTokens => throw _privateConstructorUsedError; +mixin _$RunStepDetailsToolCallsFileSearch { + /// The ranking options for the file search. + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions => + throw _privateConstructorUsedError; - /// Total number of tokens used (prompt + completion). - @JsonKey(name: 'total_tokens') - int get totalTokens => throw _privateConstructorUsedError; + /// The results of the file search. + @JsonKey(includeIfNull: false) + List? get results => + throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsFileSearch to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepCompletionUsageCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepCompletionUsageCopyWith<$Res> { - factory $RunStepCompletionUsageCopyWith(RunStepCompletionUsage value, - $Res Function(RunStepCompletionUsage) then) = - _$RunStepCompletionUsageCopyWithImpl<$Res, RunStepCompletionUsage>; +abstract class $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchCopyWith( + RunStepDetailsToolCallsFileSearch value, + $Res Function(RunStepDetailsToolCallsFileSearch) then) = + _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearch>; @useResult $Res call( - {@JsonKey(name: 'completion_tokens') int completionTokens, - @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; } /// @nodoc -class _$RunStepCompletionUsageCopyWithImpl<$Res, - $Val extends RunStepCompletionUsage> - implements $RunStepCompletionUsageCopyWith<$Res> { - _$RunStepCompletionUsageCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearch> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? completionTokens = null, - Object? promptTokens = null, - Object? totalTokens = null, + Object? rankingOptions = freezed, + Object? results = freezed, }) { return _then(_value.copyWith( - completionTokens: null == completionTokens - ? _value.completionTokens - : completionTokens // ignore: cast_nullable_to_non_nullable - as int, - promptTokens: null == promptTokens - ? _value.promptTokens - : promptTokens // ignore: cast_nullable_to_non_nullable - as int, - totalTokens: null == totalTokens - ? _value.totalTokens - : totalTokens // ignore: cast_nullable_to_non_nullable - as int, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } + + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions { + if (_value.rankingOptions == null) { + return null; + } + + return $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>( + _value.rankingOptions!, (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); + } } /// @nodoc -abstract class _$$RunStepCompletionUsageImplCopyWith<$Res> - implements $RunStepCompletionUsageCopyWith<$Res> { - factory _$$RunStepCompletionUsageImplCopyWith( - _$RunStepCompletionUsageImpl value, - $Res Function(_$RunStepCompletionUsageImpl) then) = - __$$RunStepCompletionUsageImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> + implements $RunStepDetailsToolCallsFileSearchCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchImplCopyWith( + _$RunStepDetailsToolCallsFileSearchImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) then) = + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'completion_tokens') int completionTokens, - @JsonKey(name: 'prompt_tokens') int promptTokens, - @JsonKey(name: 'total_tokens') int totalTokens}); + {@JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + @JsonKey(includeIfNull: false) + List? results}); + + @override + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res>? + get rankingOptions; } /// @nodoc -class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> - extends _$RunStepCompletionUsageCopyWithImpl<$Res, - _$RunStepCompletionUsageImpl> - implements _$$RunStepCompletionUsageImplCopyWith<$Res> { - __$$RunStepCompletionUsageImplCopyWithImpl( - _$RunStepCompletionUsageImpl _value, - $Res Function(_$RunStepCompletionUsageImpl) _then) +class __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchImpl> + implements _$$RunStepDetailsToolCallsFileSearchImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? completionTokens = null, - Object? promptTokens = null, - Object? totalTokens = null, - }) { - return _then(_$RunStepCompletionUsageImpl( - completionTokens: null == completionTokens - ? _value.completionTokens - : completionTokens // ignore: cast_nullable_to_non_nullable - as int, - promptTokens: null == promptTokens - ? _value.promptTokens - : promptTokens // ignore: cast_nullable_to_non_nullable - as int, - totalTokens: null == totalTokens - ? _value.totalTokens - : totalTokens // ignore: cast_nullable_to_non_nullable - as int, + Object? rankingOptions = freezed, + Object? results = freezed, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearchRankingOptionsObject?, + results: freezed == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { - const _$RunStepCompletionUsageImpl( - {@JsonKey(name: 'completion_tokens') required this.completionTokens, - @JsonKey(name: 'prompt_tokens') required this.promptTokens, - @JsonKey(name: 'total_tokens') required this.totalTokens}) - : super._(); +class _$RunStepDetailsToolCallsFileSearchImpl + extends _RunStepDetailsToolCallsFileSearch { + const _$RunStepDetailsToolCallsFileSearchImpl( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) + : _results = results, + super._(); - factory _$RunStepCompletionUsageImpl.fromJson(Map json) => - _$$RunStepCompletionUsageImplFromJson(json); + factory _$RunStepDetailsToolCallsFileSearchImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchImplFromJson(json); - /// Number of completion tokens used over the course of the run step. + /// The ranking options for the file search. @override - @JsonKey(name: 'completion_tokens') - final int completionTokens; + @JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions; - /// Number of prompt tokens used over the course of the run step. - @override - @JsonKey(name: 'prompt_tokens') - final int promptTokens; + /// The results of the file search. + final List? _results; - /// Total number of tokens used (prompt + completion). + /// The results of the file search. @override - @JsonKey(name: 'total_tokens') - final int totalTokens; + @JsonKey(includeIfNull: false) + List? get results { + final value = _results; + if (value == null) return null; + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'RunStepCompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + return 'RunStepDetailsToolCallsFileSearch(rankingOptions: $rankingOptions, results: $results)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepCompletionUsageImpl && - (identical(other.completionTokens, completionTokens) || - other.completionTokens == completionTokens) && - (identical(other.promptTokens, promptTokens) || - other.promptTokens == promptTokens) && - (identical(other.totalTokens, totalTokens) || - other.totalTokens == totalTokens)); + other is _$RunStepDetailsToolCallsFileSearchImpl && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions) && + const DeepCollectionEquality().equals(other._results, _results)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + int get hashCode => Object.hash(runtimeType, rankingOptions, + const DeepCollectionEquality().hash(_results)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> - get copyWith => __$$RunStepCompletionUsageImplCopyWithImpl< - _$RunStepCompletionUsageImpl>(this, _$identity); + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> + get copyWith => __$$RunStepDetailsToolCallsFileSearchImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepCompletionUsageImplToJson( + return _$$RunStepDetailsToolCallsFileSearchImplToJson( this, ); } } -abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { - const factory _RunStepCompletionUsage( - {@JsonKey(name: 'completion_tokens') required final int completionTokens, - @JsonKey(name: 'prompt_tokens') required final int promptTokens, - @JsonKey(name: 'total_tokens') - required final int totalTokens}) = _$RunStepCompletionUsageImpl; - const _RunStepCompletionUsage._() : super._(); - - factory _RunStepCompletionUsage.fromJson(Map json) = - _$RunStepCompletionUsageImpl.fromJson; +abstract class _RunStepDetailsToolCallsFileSearch + extends RunStepDetailsToolCallsFileSearch { + const factory _RunStepDetailsToolCallsFileSearch( + {@JsonKey(name: 'ranking_options', includeIfNull: false) + final RunStepDetailsToolCallsFileSearchRankingOptionsObject? + rankingOptions, + @JsonKey(includeIfNull: false) + final List? results}) = + _$RunStepDetailsToolCallsFileSearchImpl; + const _RunStepDetailsToolCallsFileSearch._() : super._(); - @override + factory _RunStepDetailsToolCallsFileSearch.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchImpl.fromJson; - /// Number of completion tokens used over the course of the run step. - @JsonKey(name: 'completion_tokens') - int get completionTokens; + /// The ranking options for the file search. @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + RunStepDetailsToolCallsFileSearchRankingOptionsObject? get rankingOptions; - /// Number of prompt tokens used over the course of the run step. - @JsonKey(name: 'prompt_tokens') - int get promptTokens; + /// The results of the file search. @override + @JsonKey(includeIfNull: false) + List? get results; - /// Total number of tokens used (prompt + completion). - @JsonKey(name: 'total_tokens') - int get totalTokens; + /// Create a copy of RunStepDetailsToolCallsFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchImplCopyWith< + _$RunStepDetailsToolCallsFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreExpirationAfter _$VectorStoreExpirationAfterFromJson( - Map json) { - return _VectorStoreExpirationAfter.fromJson(json); +RunStepDetailsToolCallsFileSearchRankingOptionsObject + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson(json); } /// @nodoc -mixin _$VectorStoreExpirationAfter { - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. - VectorStoreExpirationAfterAnchor get anchor => - throw _privateConstructorUsedError; +mixin _$RunStepDetailsToolCallsFileSearchRankingOptionsObject { + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + FileSearchRanker get ranker => throw _privateConstructorUsedError; - /// The number of days after the anchor time that the vector store will expire. - int get days => throw _privateConstructorUsedError; + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + @JsonKey(name: 'score_threshold') + double get scoreThreshold => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsFileSearchRankingOptionsObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreExpirationAfterCopyWith + + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + RunStepDetailsToolCallsFileSearchRankingOptionsObject> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreExpirationAfterCopyWith<$Res> { - factory $VectorStoreExpirationAfterCopyWith(VectorStoreExpirationAfter value, - $Res Function(VectorStoreExpirationAfter) then) = - _$VectorStoreExpirationAfterCopyWithImpl<$Res, - VectorStoreExpirationAfter>; +abstract class $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith< + $Res> { + factory $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith( + RunStepDetailsToolCallsFileSearchRankingOptionsObject value, + $Res Function(RunStepDetailsToolCallsFileSearchRankingOptionsObject) + then) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchRankingOptionsObject>; @useResult - $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc -class _$VectorStoreExpirationAfterCopyWithImpl<$Res, - $Val extends VectorStoreExpirationAfter> - implements $VectorStoreExpirationAfterCopyWith<$Res> { - _$VectorStoreExpirationAfterCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchRankingOptionsObject> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? anchor = null, - Object? days = null, + Object? ranker = null, + Object? scoreThreshold = null, }) { return _then(_value.copyWith( - anchor: null == anchor - ? _value.anchor - : anchor // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfterAnchor, - days: null == days - ? _value.days - : days // ignore: cast_nullable_to_non_nullable - as int, + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, ) as $Val); } } /// @nodoc -abstract class _$$VectorStoreExpirationAfterImplCopyWith<$Res> - implements $VectorStoreExpirationAfterCopyWith<$Res> { - factory _$$VectorStoreExpirationAfterImplCopyWith( - _$VectorStoreExpirationAfterImpl value, - $Res Function(_$VectorStoreExpirationAfterImpl) then) = - __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> + implements + $RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl value, + $Res Function( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res>; @override @useResult - $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); + $Res call( + {FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') double scoreThreshold}); } /// @nodoc -class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> - extends _$VectorStoreExpirationAfterCopyWithImpl<$Res, - _$VectorStoreExpirationAfterImpl> - implements _$$VectorStoreExpirationAfterImplCopyWith<$Res> { - __$$VectorStoreExpirationAfterImplCopyWithImpl( - _$VectorStoreExpirationAfterImpl _value, - $Res Function(_$VectorStoreExpirationAfterImpl) _then) +class __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + $Res> + extends _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectCopyWithImpl< + $Res, _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + $Res> { + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl) + _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? anchor = null, - Object? days = null, - }) { - return _then(_$VectorStoreExpirationAfterImpl( - anchor: null == anchor - ? _value.anchor - : anchor // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfterAnchor, - days: null == days - ? _value.days - : days // ignore: cast_nullable_to_non_nullable - as int, + Object? ranker = null, + Object? scoreThreshold = null, + }) { + return _then(_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: null == ranker + ? _value.ranker + : ranker // ignore: cast_nullable_to_non_nullable + as FileSearchRanker, + scoreThreshold: null == scoreThreshold + ? _value.scoreThreshold + : scoreThreshold // ignore: cast_nullable_to_non_nullable + as double, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { - const _$VectorStoreExpirationAfterImpl( - {required this.anchor, required this.days}) +class _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + extends _RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + {required this.ranker, + @JsonKey(name: 'score_threshold') required this.scoreThreshold}) : super._(); - factory _$VectorStoreExpirationAfterImpl.fromJson( + factory _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson( Map json) => - _$$VectorStoreExpirationAfterImplFromJson(json); + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + json); - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. + /// The ranker to use for the file search. If not specified will use the `auto` ranker. @override - final VectorStoreExpirationAfterAnchor anchor; + final FileSearchRanker ranker; - /// The number of days after the anchor time that the vector store will expire. + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override - final int days; + @JsonKey(name: 'score_threshold') + final double scoreThreshold; @override String toString() { - return 'VectorStoreExpirationAfter(anchor: $anchor, days: $days)'; + return 'RunStepDetailsToolCallsFileSearchRankingOptionsObject(ranker: $ranker, scoreThreshold: $scoreThreshold)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreExpirationAfterImpl && - (identical(other.anchor, anchor) || other.anchor == anchor) && - (identical(other.days, days) || other.days == days)); + other + is _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl && + (identical(other.ranker, ranker) || other.ranker == ranker) && + (identical(other.scoreThreshold, scoreThreshold) || + other.scoreThreshold == scoreThreshold)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, anchor, days); + int get hashCode => Object.hash(runtimeType, ranker, scoreThreshold); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> - get copyWith => __$$VectorStoreExpirationAfterImplCopyWithImpl< - _$VectorStoreExpirationAfterImpl>(this, _$identity); + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl>( + this, _$identity); @override Map toJson() { - return _$$VectorStoreExpirationAfterImplToJson( + return _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( this, ); } } -abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { - const factory _VectorStoreExpirationAfter( - {required final VectorStoreExpirationAfterAnchor anchor, - required final int days}) = _$VectorStoreExpirationAfterImpl; - const _VectorStoreExpirationAfter._() : super._(); +abstract class _RunStepDetailsToolCallsFileSearchRankingOptionsObject + extends RunStepDetailsToolCallsFileSearchRankingOptionsObject { + const factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject( + {required final FileSearchRanker ranker, + @JsonKey(name: 'score_threshold') + required final double scoreThreshold}) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl; + const _RunStepDetailsToolCallsFileSearchRankingOptionsObject._() : super._(); - factory _VectorStoreExpirationAfter.fromJson(Map json) = - _$VectorStoreExpirationAfterImpl.fromJson; + factory _RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl.fromJson; + /// The ranker to use for the file search. If not specified will use the `auto` ranker. @override + FileSearchRanker get ranker; - /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. - VectorStoreExpirationAfterAnchor get anchor; + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. @override + @JsonKey(name: 'score_threshold') + double get scoreThreshold; - /// The number of days after the anchor time that the vector store will expire. - int get days; + /// Create a copy of RunStepDetailsToolCallsFileSearchRankingOptionsObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreObject _$VectorStoreObjectFromJson(Map json) { - return _VectorStoreObject.fromJson(json); +RunStepDetailsToolCallsFileSearchResultObject + _$RunStepDetailsToolCallsFileSearchResultObjectFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultObject.fromJson(json); } /// @nodoc -mixin _$VectorStoreObject { - /// The identifier, which can be referenced in API endpoints. - String get id => throw _privateConstructorUsedError; - - /// The object type, which is always `vector_store`. - String get object => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the vector store was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; - - /// The name of the vector store. - String? get name => throw _privateConstructorUsedError; - - /// The total number of bytes used by the files in the vector store. - @JsonKey(name: 'usage_bytes') - int get usageBytes => throw _privateConstructorUsedError; +mixin _$RunStepDetailsToolCallsFileSearchResultObject { + /// The ID of the file that result was found in. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; - /// The number of files in the vector store. - @JsonKey(name: 'file_counts') - VectorStoreObjectFileCounts get fileCounts => - throw _privateConstructorUsedError; + /// The name of the file that result was found in. + @JsonKey(name: 'file_name') + String get fileName => throw _privateConstructorUsedError; - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - VectorStoreObjectStatus get status => throw _privateConstructorUsedError; + /// The score of the result. All values must be a floating point number between 0 and 1. + double get score => throw _privateConstructorUsedError; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + @JsonKey(includeIfNull: false) + List? get content => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the vector store was last active. - @JsonKey(name: 'last_active_at') - int? get lastActiveAt => throw _privateConstructorUsedError; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - dynamic get metadata => throw _privateConstructorUsedError; - + /// Serializes this RunStepDetailsToolCallsFileSearchResultObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreObjectCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultObjectCopyWith< + RunStepDetailsToolCallsFileSearchResultObject> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreObjectCopyWith<$Res> { - factory $VectorStoreObjectCopyWith( - VectorStoreObject value, $Res Function(VectorStoreObject) then) = - _$VectorStoreObjectCopyWithImpl<$Res, VectorStoreObject>; +abstract class $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultObjectCopyWith( + RunStepDetailsToolCallsFileSearchResultObject value, + $Res Function(RunStepDetailsToolCallsFileSearchResultObject) then) = + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultObject>; @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'created_at') int createdAt, - String? name, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, - VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'last_active_at') int? lastActiveAt, - dynamic metadata}); - - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); } /// @nodoc -class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> - implements $VectorStoreObjectCopyWith<$Res> { - _$VectorStoreObjectCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultObject> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? object = null, - Object? createdAt = null, - Object? name = freezed, - Object? usageBytes = null, - Object? fileCounts = null, - Object? status = null, - Object? expiresAfter = freezed, - Object? expiresAt = freezed, - Object? lastActiveAt = freezed, - Object? metadata = freezed, + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, }) { return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable as String, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectFileCounts, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectStatus, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - lastActiveAt: freezed == lastActiveAt - ? _value.lastActiveAt - : lastActiveAt // ignore: cast_nullable_to_non_nullable - as int?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as List?, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { - return $VectorStoreObjectFileCountsCopyWith<$Res>(_value.fileCounts, - (value) { - return _then(_value.copyWith(fileCounts: value) as $Val); - }); - } - - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } } /// @nodoc -abstract class _$$VectorStoreObjectImplCopyWith<$Res> - implements $VectorStoreObjectCopyWith<$Res> { - factory _$$VectorStoreObjectImplCopyWith(_$VectorStoreObjectImpl value, - $Res Function(_$VectorStoreObjectImpl) then) = - __$$VectorStoreObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultObjectCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'created_at') int createdAt, - String? name, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, - VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'last_active_at') int? lastActiveAt, - dynamic metadata}); - - @override - $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'file_name') String fileName, + double score, + @JsonKey(includeIfNull: false) + List? content}); } /// @nodoc -class __$$VectorStoreObjectImplCopyWithImpl<$Res> - extends _$VectorStoreObjectCopyWithImpl<$Res, _$VectorStoreObjectImpl> - implements _$$VectorStoreObjectImplCopyWith<$Res> { - __$$VectorStoreObjectImplCopyWithImpl(_$VectorStoreObjectImpl _value, - $Res Function(_$VectorStoreObjectImpl) _then) +class __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultObjectCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? object = null, - Object? createdAt = null, - Object? name = freezed, - Object? usageBytes = null, - Object? fileCounts = null, - Object? status = null, - Object? expiresAfter = freezed, - Object? expiresAt = freezed, - Object? lastActiveAt = freezed, - Object? metadata = freezed, + Object? fileId = null, + Object? fileName = null, + Object? score = null, + Object? content = freezed, }) { - return _then(_$VectorStoreObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable + return _then(_$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable + fileName: null == fileName + ? _value.fileName + : fileName // ignore: cast_nullable_to_non_nullable as String, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectFileCounts, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreObjectStatus, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - lastActiveAt: freezed == lastActiveAt - ? _value.lastActiveAt - : lastActiveAt // ignore: cast_nullable_to_non_nullable - as int?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + content: freezed == content + ? _value._content + : content // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreObjectImpl extends _VectorStoreObject { - const _$VectorStoreObjectImpl( - {required this.id, - required this.object, - @JsonKey(name: 'created_at') required this.createdAt, - required this.name, - @JsonKey(name: 'usage_bytes') required this.usageBytes, - @JsonKey(name: 'file_counts') required this.fileCounts, - required this.status, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, - @JsonKey(name: 'last_active_at') required this.lastActiveAt, - required this.metadata}) - : super._(); - - factory _$VectorStoreObjectImpl.fromJson(Map json) => - _$$VectorStoreObjectImplFromJson(json); - - /// The identifier, which can be referenced in API endpoints. - @override - final String id; - - /// The object type, which is always `vector_store`. - @override - final String object; - - /// The Unix timestamp (in seconds) for when the vector store was created. - @override - @JsonKey(name: 'created_at') - final int createdAt; - - /// The name of the vector store. - @override - final String? name; - - /// The total number of bytes used by the files in the vector store. - @override - @JsonKey(name: 'usage_bytes') - final int usageBytes; +class _$RunStepDetailsToolCallsFileSearchResultObjectImpl + extends _RunStepDetailsToolCallsFileSearchResultObject { + const _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'file_name') required this.fileName, + required this.score, + @JsonKey(includeIfNull: false) + final List? content}) + : _content = content, + super._(); - /// The number of files in the vector store. - @override - @JsonKey(name: 'file_counts') - final VectorStoreObjectFileCounts fileCounts; + factory _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson(json); - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + /// The ID of the file that result was found in. @override - final VectorStoreObjectStatus status; + @JsonKey(name: 'file_id') + final String fileId; - /// The expiration policy for a vector store. + /// The name of the file that result was found in. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + @JsonKey(name: 'file_name') + final String fileName; - /// The Unix timestamp (in seconds) for when the vector store will expire. + /// The score of the result. All values must be a floating point number between 0 and 1. @override - @JsonKey(name: 'expires_at', includeIfNull: false) - final int? expiresAt; + final double score; - /// The Unix timestamp (in seconds) for when the vector store was last active. - @override - @JsonKey(name: 'last_active_at') - final int? lastActiveAt; + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. + final List? _content; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. @override - final dynamic metadata; + @JsonKey(includeIfNull: false) + List? get content { + final value = _content; + if (value == null) return null; + if (_content is EqualUnmodifiableListView) return _content; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'VectorStoreObject(id: $id, object: $object, createdAt: $createdAt, name: $name, usageBytes: $usageBytes, fileCounts: $fileCounts, status: $status, expiresAfter: $expiresAfter, expiresAt: $expiresAt, lastActiveAt: $lastActiveAt, metadata: $metadata)'; + return 'RunStepDetailsToolCallsFileSearchResultObject(fileId: $fileId, fileName: $fileName, score: $score, content: $content)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreObjectImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.object, object) || other.object == object) && - (identical(other.createdAt, createdAt) || - other.createdAt == createdAt) && - (identical(other.name, name) || other.name == name) && - (identical(other.usageBytes, usageBytes) || - other.usageBytes == usageBytes) && - (identical(other.fileCounts, fileCounts) || - other.fileCounts == fileCounts) && - (identical(other.status, status) || other.status == status) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - (identical(other.expiresAt, expiresAt) || - other.expiresAt == expiresAt) && - (identical(other.lastActiveAt, lastActiveAt) || - other.lastActiveAt == lastActiveAt) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + other is _$RunStepDetailsToolCallsFileSearchResultObjectImpl && + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.fileName, fileName) || + other.fileName == fileName) && + (identical(other.score, score) || other.score == score) && + const DeepCollectionEquality().equals(other._content, _content)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, - id, - object, - createdAt, - name, - usageBytes, - fileCounts, - status, - expiresAfter, - expiresAt, - lastActiveAt, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => Object.hash(runtimeType, fileId, fileName, score, + const DeepCollectionEquality().hash(_content)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => - __$$VectorStoreObjectImplCopyWithImpl<_$VectorStoreObjectImpl>( - this, _$identity); + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl>( + this, _$identity); @override Map toJson() { - return _$$VectorStoreObjectImplToJson( + return _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( this, ); } } -abstract class _VectorStoreObject extends VectorStoreObject { - const factory _VectorStoreObject( - {required final String id, - required final String object, - @JsonKey(name: 'created_at') required final int createdAt, - required final String? name, - @JsonKey(name: 'usage_bytes') required final int usageBytes, - @JsonKey(name: 'file_counts') - required final VectorStoreObjectFileCounts fileCounts, - required final VectorStoreObjectStatus status, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, - @JsonKey(name: 'last_active_at') required final int? lastActiveAt, - required final dynamic metadata}) = _$VectorStoreObjectImpl; - const _VectorStoreObject._() : super._(); - - factory _VectorStoreObject.fromJson(Map json) = - _$VectorStoreObjectImpl.fromJson; - - @override - - /// The identifier, which can be referenced in API endpoints. - String get id; - @override - - /// The object type, which is always `vector_store`. - String get object; - @override - - /// The Unix timestamp (in seconds) for when the vector store was created. - @JsonKey(name: 'created_at') - int get createdAt; - @override - - /// The name of the vector store. - String? get name; - @override - - /// The total number of bytes used by the files in the vector store. - @JsonKey(name: 'usage_bytes') - int get usageBytes; - @override +abstract class _RunStepDetailsToolCallsFileSearchResultObject + extends RunStepDetailsToolCallsFileSearchResultObject { + const factory _RunStepDetailsToolCallsFileSearchResultObject( + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'file_name') required final String fileName, + required final double score, + @JsonKey(includeIfNull: false) + final List? + content}) = _$RunStepDetailsToolCallsFileSearchResultObjectImpl; + const _RunStepDetailsToolCallsFileSearchResultObject._() : super._(); - /// The number of files in the vector store. - @JsonKey(name: 'file_counts') - VectorStoreObjectFileCounts get fileCounts; - @override + factory _RunStepDetailsToolCallsFileSearchResultObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultObjectImpl.fromJson; - /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - VectorStoreObjectStatus get status; + /// The ID of the file that result was found in. @override + @JsonKey(name: 'file_id') + String get fileId; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; + /// The name of the file that result was found in. @override + @JsonKey(name: 'file_name') + String get fileName; - /// The Unix timestamp (in seconds) for when the vector store will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt; + /// The score of the result. All values must be a floating point number between 0 and 1. @override + double get score; - /// The Unix timestamp (in seconds) for when the vector store was last active. - @JsonKey(name: 'last_active_at') - int? get lastActiveAt; + /// The content of the result that was found. The content is only included if requested via the include + /// query parameter. @override + @JsonKey(includeIfNull: false) + List? get content; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - dynamic get metadata; + /// Create a copy of RunStepDetailsToolCallsFileSearchResultObject + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => - throw _privateConstructorUsedError; + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultObjectImpl> + get copyWith => throw _privateConstructorUsedError; } -VectorStoreObjectFileCounts _$VectorStoreObjectFileCountsFromJson( - Map json) { - return _VectorStoreObjectFileCounts.fromJson(json); +RunStepDetailsToolCallsFileSearchResultContent + _$RunStepDetailsToolCallsFileSearchResultContentFromJson( + Map json) { + return _RunStepDetailsToolCallsFileSearchResultContent.fromJson(json); } /// @nodoc -mixin _$VectorStoreObjectFileCounts { - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress => throw _privateConstructorUsedError; - - /// The number of files that have been successfully processed. - int get completed => throw _privateConstructorUsedError; - - /// The number of files that have failed to process. - int get failed => throw _privateConstructorUsedError; - - /// The number of files that were cancelled. - int get cancelled => throw _privateConstructorUsedError; +mixin _$RunStepDetailsToolCallsFileSearchResultContent { + /// The type of the content. + String get type => throw _privateConstructorUsedError; - /// The total number of files. - int get total => throw _privateConstructorUsedError; + /// The text content of the file. + @JsonKey(includeIfNull: false) + String? get text => throw _privateConstructorUsedError; + /// Serializes this RunStepDetailsToolCallsFileSearchResultContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreObjectFileCountsCopyWith + + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFileSearchResultContentCopyWith< + RunStepDetailsToolCallsFileSearchResultContent> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreObjectFileCountsCopyWith<$Res> { - factory $VectorStoreObjectFileCountsCopyWith( - VectorStoreObjectFileCounts value, - $Res Function(VectorStoreObjectFileCounts) then) = - _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - VectorStoreObjectFileCounts>; +abstract class $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory $RunStepDetailsToolCallsFileSearchResultContentCopyWith( + RunStepDetailsToolCallsFileSearchResultContent value, + $Res Function(RunStepDetailsToolCallsFileSearchResultContent) then) = + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + RunStepDetailsToolCallsFileSearchResultContent>; @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); } /// @nodoc -class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - $Val extends VectorStoreObjectFileCounts> - implements $VectorStoreObjectFileCountsCopyWith<$Res> { - _$VectorStoreObjectFileCountsCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFileSearchResultContent> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? type = null, + Object? text = freezed, }) { return _then(_value.copyWith( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, ) as $Val); } } /// @nodoc -abstract class _$$VectorStoreObjectFileCountsImplCopyWith<$Res> - implements $VectorStoreObjectFileCountsCopyWith<$Res> { - factory _$$VectorStoreObjectFileCountsImplCopyWith( - _$VectorStoreObjectFileCountsImpl value, - $Res Function(_$VectorStoreObjectFileCountsImpl) then) = - __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + $Res> + implements $RunStepDetailsToolCallsFileSearchResultContentCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith( + _$RunStepDetailsToolCallsFileSearchResultContentImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) + then) = + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({String type, @JsonKey(includeIfNull: false) String? text}); } /// @nodoc -class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> - extends _$VectorStoreObjectFileCountsCopyWithImpl<$Res, - _$VectorStoreObjectFileCountsImpl> - implements _$$VectorStoreObjectFileCountsImplCopyWith<$Res> { - __$$VectorStoreObjectFileCountsImplCopyWithImpl( - _$VectorStoreObjectFileCountsImpl _value, - $Res Function(_$VectorStoreObjectFileCountsImpl) _then) +class __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFileSearchResultContentCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + implements + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchResultContentImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchResultContentImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? type = null, + Object? text = freezed, }) { - return _then(_$VectorStoreObjectFileCountsImpl( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String?, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { - const _$VectorStoreObjectFileCountsImpl( - {@JsonKey(name: 'in_progress') required this.inProgress, - required this.completed, - required this.failed, - required this.cancelled, - required this.total}) +class _$RunStepDetailsToolCallsFileSearchResultContentImpl + extends _RunStepDetailsToolCallsFileSearchResultContent { + const _$RunStepDetailsToolCallsFileSearchResultContentImpl( + {this.type = 'text', @JsonKey(includeIfNull: false) this.text}) : super._(); - factory _$VectorStoreObjectFileCountsImpl.fromJson( + factory _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson( Map json) => - _$$VectorStoreObjectFileCountsImplFromJson(json); + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson(json); - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - final int inProgress; - - /// The number of files that have been successfully processed. - @override - final int completed; - - /// The number of files that have failed to process. - @override - final int failed; - - /// The number of files that were cancelled. + /// The type of the content. @override - final int cancelled; + @JsonKey() + final String type; - /// The total number of files. + /// The text content of the file. @override - final int total; + @JsonKey(includeIfNull: false) + final String? text; @override String toString() { - return 'VectorStoreObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; + return 'RunStepDetailsToolCallsFileSearchResultContent(type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreObjectFileCountsImpl && - (identical(other.inProgress, inProgress) || - other.inProgress == inProgress) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed) && - (identical(other.cancelled, cancelled) || - other.cancelled == cancelled) && - (identical(other.total, total) || other.total == total)); + other is _$RunStepDetailsToolCallsFileSearchResultContentImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); + int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> - get copyWith => __$$VectorStoreObjectFileCountsImplCopyWithImpl< - _$VectorStoreObjectFileCountsImpl>(this, _$identity); + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchResultContentImpl>( + this, _$identity); @override Map toJson() { - return _$$VectorStoreObjectFileCountsImplToJson( + return _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( this, ); } } -abstract class _VectorStoreObjectFileCounts - extends VectorStoreObjectFileCounts { - const factory _VectorStoreObjectFileCounts( - {@JsonKey(name: 'in_progress') required final int inProgress, - required final int completed, - required final int failed, - required final int cancelled, - required final int total}) = _$VectorStoreObjectFileCountsImpl; - const _VectorStoreObjectFileCounts._() : super._(); +abstract class _RunStepDetailsToolCallsFileSearchResultContent + extends RunStepDetailsToolCallsFileSearchResultContent { + const factory _RunStepDetailsToolCallsFileSearchResultContent( + {final String type, + @JsonKey(includeIfNull: false) final String? text}) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl; + const _RunStepDetailsToolCallsFileSearchResultContent._() : super._(); - factory _VectorStoreObjectFileCounts.fromJson(Map json) = - _$VectorStoreObjectFileCountsImpl.fromJson; + factory _RunStepDetailsToolCallsFileSearchResultContent.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchResultContentImpl.fromJson; + /// The type of the content. @override + String get type; - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress; + /// The text content of the file. @override + @JsonKey(includeIfNull: false) + String? get text; - /// The number of files that have been successfully processed. - int get completed; + /// Create a copy of RunStepDetailsToolCallsFileSearchResultContent + /// with the given fields replaced by the non-null parameter values. @override - - /// The number of files that have failed to process. - int get failed; - @override - - /// The number of files that were cancelled. - int get cancelled; - @override - - /// The total number of files. - int get total; - @override - @JsonKey(ignore: true) - _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchResultContentImplCopyWith< + _$RunStepDetailsToolCallsFileSearchResultContentImpl> get copyWith => throw _privateConstructorUsedError; } -CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( +RunStepCompletionUsage _$RunStepCompletionUsageFromJson( Map json) { - return _CreateVectorStoreRequest.fromJson(json); + return _RunStepCompletionUsage.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreRequest { - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds => throw _privateConstructorUsedError; - - /// The name of the vector store. - String get name => throw _privateConstructorUsedError; +mixin _$RunStepCompletionUsage { + /// Number of completion tokens used over the course of the run step. + @JsonKey(name: 'completion_tokens') + int get completionTokens => throw _privateConstructorUsedError; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => - throw _privateConstructorUsedError; + /// Number of prompt tokens used over the course of the run step. + @JsonKey(name: 'prompt_tokens') + int get promptTokens => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; + /// Total number of tokens used (prompt + completion). + @JsonKey(name: 'total_tokens') + int get totalTokens => throw _privateConstructorUsedError; + /// Serializes this RunStepCompletionUsage to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateVectorStoreRequestCopyWith get copyWith => + + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepCompletionUsageCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreRequestCopyWith<$Res> { - factory $CreateVectorStoreRequestCopyWith(CreateVectorStoreRequest value, - $Res Function(CreateVectorStoreRequest) then) = - _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; +abstract class $RunStepCompletionUsageCopyWith<$Res> { + factory $RunStepCompletionUsageCopyWith(RunStepCompletionUsage value, + $Res Function(RunStepCompletionUsage) then) = + _$RunStepCompletionUsageCopyWithImpl<$Res, RunStepCompletionUsage>; @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {@JsonKey(name: 'completion_tokens') int completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc -class _$CreateVectorStoreRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreRequest> - implements $CreateVectorStoreRequestCopyWith<$Res> { - _$CreateVectorStoreRequestCopyWithImpl(this._value, this._then); +class _$RunStepCompletionUsageCopyWithImpl<$Res, + $Val extends RunStepCompletionUsage> + implements $RunStepCompletionUsageCopyWith<$Res> { + _$RunStepCompletionUsageCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = freezed, - Object? name = null, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? completionTokens = null, + Object? promptTokens = null, + Object? totalTokens = null, }) { return _then(_value.copyWith( - fileIds: freezed == fileIds - ? _value.fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + completionTokens: null == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } } /// @nodoc -abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> - implements $CreateVectorStoreRequestCopyWith<$Res> { - factory _$$CreateVectorStoreRequestImplCopyWith( - _$CreateVectorStoreRequestImpl value, - $Res Function(_$CreateVectorStoreRequestImpl) then) = - __$$CreateVectorStoreRequestImplCopyWithImpl<$Res>; +abstract class _$$RunStepCompletionUsageImplCopyWith<$Res> + implements $RunStepCompletionUsageCopyWith<$Res> { + factory _$$RunStepCompletionUsageImplCopyWith( + _$RunStepCompletionUsageImpl value, + $Res Function(_$RunStepCompletionUsageImpl) then) = + __$$RunStepCompletionUsageImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, - String name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + {@JsonKey(name: 'completion_tokens') int completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); } /// @nodoc -class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreRequestCopyWithImpl<$Res, - _$CreateVectorStoreRequestImpl> - implements _$$CreateVectorStoreRequestImplCopyWith<$Res> { - __$$CreateVectorStoreRequestImplCopyWithImpl( - _$CreateVectorStoreRequestImpl _value, - $Res Function(_$CreateVectorStoreRequestImpl) _then) +class __$$RunStepCompletionUsageImplCopyWithImpl<$Res> + extends _$RunStepCompletionUsageCopyWithImpl<$Res, + _$RunStepCompletionUsageImpl> + implements _$$RunStepCompletionUsageImplCopyWith<$Res> { + __$$RunStepCompletionUsageImplCopyWithImpl( + _$RunStepCompletionUsageImpl _value, + $Res Function(_$RunStepCompletionUsageImpl) _then) : super(_value, _then); + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = freezed, - Object? name = null, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? completionTokens = null, + Object? promptTokens = null, + Object? totalTokens = null, }) { - return _then(_$CreateVectorStoreRequestImpl( - fileIds: freezed == fileIds - ? _value._fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + return _then(_$RunStepCompletionUsageImpl( + completionTokens: null == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { - const _$CreateVectorStoreRequestImpl( - {@JsonKey(name: 'file_ids', includeIfNull: false) - final List? fileIds, - required this.name, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(includeIfNull: false) this.metadata}) - : _fileIds = fileIds, - super._(); - - factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => - _$$CreateVectorStoreRequestImplFromJson(json); - - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - final List? _fileIds; +class _$RunStepCompletionUsageImpl extends _RunStepCompletionUsage { + const _$RunStepCompletionUsageImpl( + {@JsonKey(name: 'completion_tokens') required this.completionTokens, + @JsonKey(name: 'prompt_tokens') required this.promptTokens, + @JsonKey(name: 'total_tokens') required this.totalTokens}) + : super._(); - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @override - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds { - final value = _fileIds; - if (value == null) return null; - if (_fileIds is EqualUnmodifiableListView) return _fileIds; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + factory _$RunStepCompletionUsageImpl.fromJson(Map json) => + _$$RunStepCompletionUsageImplFromJson(json); - /// The name of the vector store. + /// Number of completion tokens used over the course of the run step. @override - final String name; + @JsonKey(name: 'completion_tokens') + final int completionTokens; - /// The expiration policy for a vector store. + /// Number of prompt tokens used over the course of the run step. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + @JsonKey(name: 'prompt_tokens') + final int promptTokens; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Total number of tokens used (prompt + completion). @override - @JsonKey(includeIfNull: false) - final dynamic metadata; + @JsonKey(name: 'total_tokens') + final int totalTokens; @override String toString() { - return 'CreateVectorStoreRequest(fileIds: $fileIds, name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'RunStepCompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateVectorStoreRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - (identical(other.name, name) || other.name == name) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + other is _$RunStepCompletionUsageImpl && + (identical(other.completionTokens, completionTokens) || + other.completionTokens == completionTokens) && + (identical(other.promptTokens, promptTokens) || + other.promptTokens == promptTokens) && + (identical(other.totalTokens, totalTokens) || + other.totalTokens == totalTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, - const DeepCollectionEquality().hash(_fileIds), - name, - expiresAfter, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => + Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); - @JsonKey(ignore: true) + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> - get copyWith => __$$CreateVectorStoreRequestImplCopyWithImpl< - _$CreateVectorStoreRequestImpl>(this, _$identity); + _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> + get copyWith => __$$RunStepCompletionUsageImplCopyWithImpl< + _$RunStepCompletionUsageImpl>(this, _$identity); @override Map toJson() { - return _$$CreateVectorStoreRequestImplToJson( + return _$$RunStepCompletionUsageImplToJson( this, ); } } -abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { - const factory _CreateVectorStoreRequest( - {@JsonKey(name: 'file_ids', includeIfNull: false) - final List? fileIds, - required final String name, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) final dynamic metadata}) = - _$CreateVectorStoreRequestImpl; - const _CreateVectorStoreRequest._() : super._(); - - factory _CreateVectorStoreRequest.fromJson(Map json) = - _$CreateVectorStoreRequestImpl.fromJson; +abstract class _RunStepCompletionUsage extends RunStepCompletionUsage { + const factory _RunStepCompletionUsage( + {@JsonKey(name: 'completion_tokens') required final int completionTokens, + @JsonKey(name: 'prompt_tokens') required final int promptTokens, + @JsonKey(name: 'total_tokens') + required final int totalTokens}) = _$RunStepCompletionUsageImpl; + const _RunStepCompletionUsage._() : super._(); - @override + factory _RunStepCompletionUsage.fromJson(Map json) = + _$RunStepCompletionUsageImpl.fromJson; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids', includeIfNull: false) - List? get fileIds; + /// Number of completion tokens used over the course of the run step. @override + @JsonKey(name: 'completion_tokens') + int get completionTokens; - /// The name of the vector store. - String get name; + /// Number of prompt tokens used over the course of the run step. @override + @JsonKey(name: 'prompt_tokens') + int get promptTokens; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; + /// Total number of tokens used (prompt + completion). @override + @JsonKey(name: 'total_tokens') + int get totalTokens; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata; + /// Create a copy of RunStepCompletionUsage + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepCompletionUsageImplCopyWith<_$RunStepCompletionUsageImpl> get copyWith => throw _privateConstructorUsedError; } -UpdateVectorStoreRequest _$UpdateVectorStoreRequestFromJson( +VectorStoreExpirationAfter _$VectorStoreExpirationAfterFromJson( Map json) { - return _UpdateVectorStoreRequest.fromJson(json); + return _VectorStoreExpirationAfter.fromJson(json); } /// @nodoc -mixin _$UpdateVectorStoreRequest { - /// The name of the vector store. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; - - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter => +mixin _$VectorStoreExpirationAfter { + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. + VectorStoreExpirationAfterAnchor get anchor => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; + /// The number of days after the anchor time that the vector store will expire. + int get days => throw _privateConstructorUsedError; + /// Serializes this VectorStoreExpirationAfter to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $UpdateVectorStoreRequestCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreExpirationAfterCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $UpdateVectorStoreRequestCopyWith<$Res> { - factory $UpdateVectorStoreRequestCopyWith(UpdateVectorStoreRequest value, - $Res Function(UpdateVectorStoreRequest) then) = - _$UpdateVectorStoreRequestCopyWithImpl<$Res, UpdateVectorStoreRequest>; +abstract class $VectorStoreExpirationAfterCopyWith<$Res> { + factory $VectorStoreExpirationAfterCopyWith(VectorStoreExpirationAfter value, + $Res Function(VectorStoreExpirationAfter) then) = + _$VectorStoreExpirationAfterCopyWithImpl<$Res, + VectorStoreExpirationAfter>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); } /// @nodoc -class _$UpdateVectorStoreRequestCopyWithImpl<$Res, - $Val extends UpdateVectorStoreRequest> - implements $UpdateVectorStoreRequestCopyWith<$Res> { - _$UpdateVectorStoreRequestCopyWithImpl(this._value, this._then); +class _$VectorStoreExpirationAfterCopyWithImpl<$Res, + $Val extends VectorStoreExpirationAfter> + implements $VectorStoreExpirationAfterCopyWith<$Res> { + _$VectorStoreExpirationAfterCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? anchor = null, + Object? days = null, }) { return _then(_value.copyWith( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + anchor: null == anchor + ? _value.anchor + : anchor // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfterAnchor, + days: null == days + ? _value.days + : days // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { - if (_value.expiresAfter == null) { - return null; - } - - return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, - (value) { - return _then(_value.copyWith(expiresAfter: value) as $Val); - }); - } } /// @nodoc -abstract class _$$UpdateVectorStoreRequestImplCopyWith<$Res> - implements $UpdateVectorStoreRequestCopyWith<$Res> { - factory _$$UpdateVectorStoreRequestImplCopyWith( - _$UpdateVectorStoreRequestImpl value, - $Res Function(_$UpdateVectorStoreRequestImpl) then) = - __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreExpirationAfterImplCopyWith<$Res> + implements $VectorStoreExpirationAfterCopyWith<$Res> { + factory _$$VectorStoreExpirationAfterImplCopyWith( + _$VectorStoreExpirationAfterImpl value, + $Res Function(_$VectorStoreExpirationAfterImpl) then) = + __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $Res call({VectorStoreExpirationAfterAnchor anchor, int days}); } /// @nodoc -class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> - extends _$UpdateVectorStoreRequestCopyWithImpl<$Res, - _$UpdateVectorStoreRequestImpl> - implements _$$UpdateVectorStoreRequestImplCopyWith<$Res> { - __$$UpdateVectorStoreRequestImplCopyWithImpl( - _$UpdateVectorStoreRequestImpl _value, - $Res Function(_$UpdateVectorStoreRequestImpl) _then) +class __$$VectorStoreExpirationAfterImplCopyWithImpl<$Res> + extends _$VectorStoreExpirationAfterCopyWithImpl<$Res, + _$VectorStoreExpirationAfterImpl> + implements _$$VectorStoreExpirationAfterImplCopyWith<$Res> { + __$$VectorStoreExpirationAfterImplCopyWithImpl( + _$VectorStoreExpirationAfterImpl _value, + $Res Function(_$VectorStoreExpirationAfterImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = freezed, - Object? expiresAfter = freezed, - Object? metadata = freezed, + Object? anchor = null, + Object? days = null, }) { - return _then(_$UpdateVectorStoreRequestImpl( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - expiresAfter: freezed == expiresAfter - ? _value.expiresAfter - : expiresAfter // ignore: cast_nullable_to_non_nullable - as VectorStoreExpirationAfter?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, + return _then(_$VectorStoreExpirationAfterImpl( + anchor: null == anchor + ? _value.anchor + : anchor // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfterAnchor, + days: null == days + ? _value.days + : days // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { - const _$UpdateVectorStoreRequestImpl( - {@JsonKey(includeIfNull: false) this.name, - @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, - @JsonKey(includeIfNull: false) this.metadata}) +class _$VectorStoreExpirationAfterImpl extends _VectorStoreExpirationAfter { + const _$VectorStoreExpirationAfterImpl( + {required this.anchor, required this.days}) : super._(); - factory _$UpdateVectorStoreRequestImpl.fromJson(Map json) => - _$$UpdateVectorStoreRequestImplFromJson(json); - - /// The name of the vector store. - @override - @JsonKey(includeIfNull: false) - final String? name; + factory _$VectorStoreExpirationAfterImpl.fromJson( + Map json) => + _$$VectorStoreExpirationAfterImplFromJson(json); - /// The expiration policy for a vector store. + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. @override - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter; + final VectorStoreExpirationAfterAnchor anchor; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// The number of days after the anchor time that the vector store will expire. @override - @JsonKey(includeIfNull: false) - final dynamic metadata; + final int days; @override String toString() { - return 'UpdateVectorStoreRequest(name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; + return 'VectorStoreExpirationAfter(anchor: $anchor, days: $days)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$UpdateVectorStoreRequestImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.expiresAfter, expiresAfter) || - other.expiresAfter == expiresAfter) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + other is _$VectorStoreExpirationAfterImpl && + (identical(other.anchor, anchor) || other.anchor == anchor) && + (identical(other.days, days) || other.days == days)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, name, expiresAfter, - const DeepCollectionEquality().hash(metadata)); + int get hashCode => Object.hash(runtimeType, anchor, days); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> - get copyWith => __$$UpdateVectorStoreRequestImplCopyWithImpl< - _$UpdateVectorStoreRequestImpl>(this, _$identity); + _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> + get copyWith => __$$VectorStoreExpirationAfterImplCopyWithImpl< + _$VectorStoreExpirationAfterImpl>(this, _$identity); @override Map toJson() { - return _$$UpdateVectorStoreRequestImplToJson( + return _$$VectorStoreExpirationAfterImplToJson( this, ); } } -abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { - const factory _UpdateVectorStoreRequest( - {@JsonKey(includeIfNull: false) final String? name, - @JsonKey(name: 'expires_after', includeIfNull: false) - final VectorStoreExpirationAfter? expiresAfter, - @JsonKey(includeIfNull: false) final dynamic metadata}) = - _$UpdateVectorStoreRequestImpl; - const _UpdateVectorStoreRequest._() : super._(); - - factory _UpdateVectorStoreRequest.fromJson(Map json) = - _$UpdateVectorStoreRequestImpl.fromJson; +abstract class _VectorStoreExpirationAfter extends VectorStoreExpirationAfter { + const factory _VectorStoreExpirationAfter( + {required final VectorStoreExpirationAfterAnchor anchor, + required final int days}) = _$VectorStoreExpirationAfterImpl; + const _VectorStoreExpirationAfter._() : super._(); - @override + factory _VectorStoreExpirationAfter.fromJson(Map json) = + _$VectorStoreExpirationAfterImpl.fromJson; - /// The name of the vector store. - @JsonKey(includeIfNull: false) - String? get name; + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. @override + VectorStoreExpirationAfterAnchor get anchor; - /// The expiration policy for a vector store. - @JsonKey(name: 'expires_after', includeIfNull: false) - VectorStoreExpirationAfter? get expiresAfter; + /// The number of days after the anchor time that the vector store will expire. @override + int get days; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata; + /// Create a copy of VectorStoreExpirationAfter + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreExpirationAfterImplCopyWith<_$VectorStoreExpirationAfterImpl> get copyWith => throw _privateConstructorUsedError; } -ListVectorStoresResponse _$ListVectorStoresResponseFromJson( - Map json) { - return _ListVectorStoresResponse.fromJson(json); +VectorStoreObject _$VectorStoreObjectFromJson(Map json) { + return _VectorStoreObject.fromJson(json); } /// @nodoc -mixin _$ListVectorStoresResponse { - /// The object type, which is always `list`. +mixin _$VectorStoreObject { + /// The identifier, which can be referenced in API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The object type, which is always `vector_store`. String get object => throw _privateConstructorUsedError; - /// A list of assistant files. - List get data => throw _privateConstructorUsedError; + /// The Unix timestamp (in seconds) for when the vector store was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; - /// The ID of the first assistant file in the list. - @JsonKey(name: 'first_id') - String? get firstId => throw _privateConstructorUsedError; + /// The name of the vector store. + String? get name => throw _privateConstructorUsedError; - /// The ID of the last assistant file in the list. - @JsonKey(name: 'last_id') - String? get lastId => throw _privateConstructorUsedError; + /// The total number of bytes used by the files in the vector store. + @JsonKey(name: 'usage_bytes') + int get usageBytes => throw _privateConstructorUsedError; - /// Whether there are more assistant files available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; + /// The number of files in the vector store. + @JsonKey(name: 'file_counts') + VectorStoreObjectFileCounts get fileCounts => + throw _privateConstructorUsedError; + + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + VectorStoreObjectStatus get status => throw _privateConstructorUsedError; + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => + throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @JsonKey(name: 'last_active_at') + int? get lastActiveAt => throw _privateConstructorUsedError; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ListVectorStoresResponseCopyWith get copyWith => + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreObjectCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ListVectorStoresResponseCopyWith<$Res> { - factory $ListVectorStoresResponseCopyWith(ListVectorStoresResponse value, - $Res Function(ListVectorStoresResponse) then) = - _$ListVectorStoresResponseCopyWithImpl<$Res, ListVectorStoresResponse>; +abstract class $VectorStoreObjectCopyWith<$Res> { + factory $VectorStoreObjectCopyWith( + VectorStoreObject value, $Res Function(VectorStoreObject) then) = + _$VectorStoreObjectCopyWithImpl<$Res, VectorStoreObject>; @useResult $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String? firstId, - @JsonKey(name: 'last_id') String? lastId, - @JsonKey(name: 'has_more') bool hasMore}); + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + String? name, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, + VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'last_active_at') int? lastActiveAt, + dynamic metadata}); + + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class _$ListVectorStoresResponseCopyWithImpl<$Res, - $Val extends ListVectorStoresResponse> - implements $ListVectorStoresResponseCopyWith<$Res> { - _$ListVectorStoresResponseCopyWithImpl(this._value, this._then); +class _$VectorStoreObjectCopyWithImpl<$Res, $Val extends VectorStoreObject> + implements $VectorStoreObjectCopyWith<$Res> { + _$VectorStoreObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? object = null, - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, + Object? createdAt = null, + Object? name = freezed, + Object? usageBytes = null, + Object? fileCounts = null, + Object? status = null, + Object? expiresAfter = freezed, + Object? expiresAt = freezed, + Object? lastActiveAt = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectFileCounts, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectStatus, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + lastActiveAt: freezed == lastActiveAt + ? _value.lastActiveAt + : lastActiveAt // ignore: cast_nullable_to_non_nullable + as int?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts { + return $VectorStoreObjectFileCountsCopyWith<$Res>(_value.fileCounts, + (value) { + return _then(_value.copyWith(fileCounts: value) as $Val); + }); + } + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { + return null; + } + + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, + (value) { + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } } /// @nodoc -abstract class _$$ListVectorStoresResponseImplCopyWith<$Res> - implements $ListVectorStoresResponseCopyWith<$Res> { - factory _$$ListVectorStoresResponseImplCopyWith( - _$ListVectorStoresResponseImpl value, - $Res Function(_$ListVectorStoresResponseImpl) then) = - __$$ListVectorStoresResponseImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreObjectImplCopyWith<$Res> + implements $VectorStoreObjectCopyWith<$Res> { + factory _$$VectorStoreObjectImplCopyWith(_$VectorStoreObjectImpl value, + $Res Function(_$VectorStoreObjectImpl) then) = + __$$VectorStoreObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String? firstId, - @JsonKey(name: 'last_id') String? lastId, - @JsonKey(name: 'has_more') bool hasMore}); + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + String? name, + @JsonKey(name: 'usage_bytes') int usageBytes, + @JsonKey(name: 'file_counts') VectorStoreObjectFileCounts fileCounts, + VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'last_active_at') int? lastActiveAt, + dynamic metadata}); + + @override + $VectorStoreObjectFileCountsCopyWith<$Res> get fileCounts; + @override + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> - extends _$ListVectorStoresResponseCopyWithImpl<$Res, - _$ListVectorStoresResponseImpl> - implements _$$ListVectorStoresResponseImplCopyWith<$Res> { - __$$ListVectorStoresResponseImplCopyWithImpl( - _$ListVectorStoresResponseImpl _value, - $Res Function(_$ListVectorStoresResponseImpl) _then) +class __$$VectorStoreObjectImplCopyWithImpl<$Res> + extends _$VectorStoreObjectCopyWithImpl<$Res, _$VectorStoreObjectImpl> + implements _$$VectorStoreObjectImplCopyWith<$Res> { + __$$VectorStoreObjectImplCopyWithImpl(_$VectorStoreObjectImpl _value, + $Res Function(_$VectorStoreObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? object = null, - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, + Object? createdAt = null, + Object? name = freezed, + Object? usageBytes = null, + Object? fileCounts = null, + Object? status = null, + Object? expiresAfter = freezed, + Object? expiresAt = freezed, + Object? lastActiveAt = freezed, + Object? metadata = freezed, }) { - return _then(_$ListVectorStoresResponseImpl( + return _then(_$VectorStoreObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, - data: null == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectFileCounts, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreObjectStatus, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + lastActiveAt: freezed == lastActiveAt + ? _value.lastActiveAt + : lastActiveAt // ignore: cast_nullable_to_non_nullable + as int?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { - const _$ListVectorStoresResponseImpl( - {required this.object, - required final List data, - @JsonKey(name: 'first_id') required this.firstId, - @JsonKey(name: 'last_id') required this.lastId, - @JsonKey(name: 'has_more') required this.hasMore}) - : _data = data, - super._(); +class _$VectorStoreObjectImpl extends _VectorStoreObject { + const _$VectorStoreObjectImpl( + {required this.id, + required this.object, + @JsonKey(name: 'created_at') required this.createdAt, + required this.name, + @JsonKey(name: 'usage_bytes') required this.usageBytes, + @JsonKey(name: 'file_counts') required this.fileCounts, + required this.status, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'last_active_at') required this.lastActiveAt, + required this.metadata}) + : super._(); - factory _$ListVectorStoresResponseImpl.fromJson(Map json) => - _$$ListVectorStoresResponseImplFromJson(json); + factory _$VectorStoreObjectImpl.fromJson(Map json) => + _$$VectorStoreObjectImplFromJson(json); - /// The object type, which is always `list`. + /// The identifier, which can be referenced in API endpoints. + @override + final String id; + + /// The object type, which is always `vector_store`. @override final String object; - /// A list of assistant files. - final List _data; + /// The Unix timestamp (in seconds) for when the vector store was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; - /// A list of assistant files. + /// The name of the vector store. @override - List get data { - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_data); - } + final String? name; - /// The ID of the first assistant file in the list. + /// The total number of bytes used by the files in the vector store. @override - @JsonKey(name: 'first_id') - final String? firstId; + @JsonKey(name: 'usage_bytes') + final int usageBytes; - /// The ID of the last assistant file in the list. + /// The number of files in the vector store. @override - @JsonKey(name: 'last_id') - final String? lastId; + @JsonKey(name: 'file_counts') + final VectorStoreObjectFileCounts fileCounts; - /// Whether there are more assistant files available. + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. @override - @JsonKey(name: 'has_more') - final bool hasMore; + final VectorStoreObjectStatus status; + + /// The expiration policy for a vector store. + @override + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + final int? expiresAt; + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @override + @JsonKey(name: 'last_active_at') + final int? lastActiveAt; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override + final dynamic metadata; @override String toString() { - return 'ListVectorStoresResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; + return 'VectorStoreObject(id: $id, object: $object, createdAt: $createdAt, name: $name, usageBytes: $usageBytes, fileCounts: $fileCounts, status: $status, expiresAfter: $expiresAfter, expiresAt: $expiresAt, lastActiveAt: $lastActiveAt, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ListVectorStoresResponseImpl && + other is _$VectorStoreObjectImpl && + (identical(other.id, id) || other.id == id) && (identical(other.object, object) || other.object == object) && - const DeepCollectionEquality().equals(other._data, _data) && - (identical(other.firstId, firstId) || other.firstId == firstId) && - (identical(other.lastId, lastId) || other.lastId == lastId) && - (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.name, name) || other.name == name) && + (identical(other.usageBytes, usageBytes) || + other.usageBytes == usageBytes) && + (identical(other.fileCounts, fileCounts) || + other.fileCounts == fileCounts) && + (identical(other.status, status) || other.status == status) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.lastActiveAt, lastActiveAt) || + other.lastActiveAt == lastActiveAt) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, object, - const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); + int get hashCode => Object.hash( + runtimeType, + id, + object, + createdAt, + name, + usageBytes, + fileCounts, + status, + expiresAfter, + expiresAt, + lastActiveAt, + const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> - get copyWith => __$$ListVectorStoresResponseImplCopyWithImpl< - _$ListVectorStoresResponseImpl>(this, _$identity); + _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => + __$$VectorStoreObjectImplCopyWithImpl<_$VectorStoreObjectImpl>( + this, _$identity); @override Map toJson() { - return _$$ListVectorStoresResponseImplToJson( + return _$$VectorStoreObjectImplToJson( this, ); } } -abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { - const factory _ListVectorStoresResponse( - {required final String object, - required final List data, - @JsonKey(name: 'first_id') required final String? firstId, - @JsonKey(name: 'last_id') required final String? lastId, - @JsonKey(name: 'has_more') required final bool hasMore}) = - _$ListVectorStoresResponseImpl; - const _ListVectorStoresResponse._() : super._(); +abstract class _VectorStoreObject extends VectorStoreObject { + const factory _VectorStoreObject( + {required final String id, + required final String object, + @JsonKey(name: 'created_at') required final int createdAt, + required final String? name, + @JsonKey(name: 'usage_bytes') required final int usageBytes, + @JsonKey(name: 'file_counts') + required final VectorStoreObjectFileCounts fileCounts, + required final VectorStoreObjectStatus status, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, + @JsonKey(name: 'last_active_at') required final int? lastActiveAt, + required final dynamic metadata}) = _$VectorStoreObjectImpl; + const _VectorStoreObject._() : super._(); - factory _ListVectorStoresResponse.fromJson(Map json) = - _$ListVectorStoresResponseImpl.fromJson; + factory _VectorStoreObject.fromJson(Map json) = + _$VectorStoreObjectImpl.fromJson; + /// The identifier, which can be referenced in API endpoints. @override + String get id; - /// The object type, which is always `list`. + /// The object type, which is always `vector_store`. + @override String get object; + + /// The Unix timestamp (in seconds) for when the vector store was created. @override + @JsonKey(name: 'created_at') + int get createdAt; - /// A list of assistant files. - List get data; + /// The name of the vector store. @override + String? get name; - /// The ID of the first assistant file in the list. - @JsonKey(name: 'first_id') - String? get firstId; + /// The total number of bytes used by the files in the vector store. @override + @JsonKey(name: 'usage_bytes') + int get usageBytes; - /// The ID of the last assistant file in the list. - @JsonKey(name: 'last_id') - String? get lastId; + /// The number of files in the vector store. @override + @JsonKey(name: 'file_counts') + VectorStoreObjectFileCounts get fileCounts; - /// Whether there are more assistant files available. - @JsonKey(name: 'has_more') - bool get hasMore; + /// The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. @override - @JsonKey(ignore: true) - _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> - get copyWith => throw _privateConstructorUsedError; + VectorStoreObjectStatus get status; + + /// The expiration policy for a vector store. + @override + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; + + /// The Unix timestamp (in seconds) for when the vector store will expire. + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt; + + /// The Unix timestamp (in seconds) for when the vector store was last active. + @override + @JsonKey(name: 'last_active_at') + int? get lastActiveAt; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override + dynamic get metadata; + + /// Create a copy of VectorStoreObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreObjectImplCopyWith<_$VectorStoreObjectImpl> get copyWith => + throw _privateConstructorUsedError; } -DeleteVectorStoreResponse _$DeleteVectorStoreResponseFromJson( +VectorStoreObjectFileCounts _$VectorStoreObjectFileCountsFromJson( Map json) { - return _DeleteVectorStoreResponse.fromJson(json); + return _VectorStoreObjectFileCounts.fromJson(json); } /// @nodoc -mixin _$DeleteVectorStoreResponse { - /// The ID of the deleted vector store. - String get id => throw _privateConstructorUsedError; +mixin _$VectorStoreObjectFileCounts { + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress => throw _privateConstructorUsedError; - /// Whether the vector store was deleted. - bool get deleted => throw _privateConstructorUsedError; + /// The number of files that have been successfully processed. + int get completed => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.deleted`. - String get object => throw _privateConstructorUsedError; + /// The number of files that have failed to process. + int get failed => throw _privateConstructorUsedError; + + /// The number of files that were cancelled. + int get cancelled => throw _privateConstructorUsedError; + + /// The total number of files. + int get total => throw _privateConstructorUsedError; + /// Serializes this VectorStoreObjectFileCounts to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $DeleteVectorStoreResponseCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreObjectFileCountsCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $DeleteVectorStoreResponseCopyWith<$Res> { - factory $DeleteVectorStoreResponseCopyWith(DeleteVectorStoreResponse value, - $Res Function(DeleteVectorStoreResponse) then) = - _$DeleteVectorStoreResponseCopyWithImpl<$Res, DeleteVectorStoreResponse>; +abstract class $VectorStoreObjectFileCountsCopyWith<$Res> { + factory $VectorStoreObjectFileCountsCopyWith( + VectorStoreObjectFileCounts value, + $Res Function(VectorStoreObjectFileCounts) then) = + _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + VectorStoreObjectFileCounts>; @useResult - $Res call({String id, bool deleted, String object}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class _$DeleteVectorStoreResponseCopyWithImpl<$Res, - $Val extends DeleteVectorStoreResponse> - implements $DeleteVectorStoreResponseCopyWith<$Res> { - _$DeleteVectorStoreResponseCopyWithImpl(this._value, this._then); +class _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + $Val extends VectorStoreObjectFileCounts> + implements $VectorStoreObjectFileCountsCopyWith<$Res> { + _$VectorStoreObjectFileCountsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? deleted = null, - Object? object = null, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - deleted: null == deleted - ? _value.deleted - : deleted // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } } /// @nodoc -abstract class _$$DeleteVectorStoreResponseImplCopyWith<$Res> - implements $DeleteVectorStoreResponseCopyWith<$Res> { - factory _$$DeleteVectorStoreResponseImplCopyWith( - _$DeleteVectorStoreResponseImpl value, - $Res Function(_$DeleteVectorStoreResponseImpl) then) = - __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreObjectFileCountsImplCopyWith<$Res> + implements $VectorStoreObjectFileCountsCopyWith<$Res> { + factory _$$VectorStoreObjectFileCountsImplCopyWith( + _$VectorStoreObjectFileCountsImpl value, + $Res Function(_$VectorStoreObjectFileCountsImpl) then) = + __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res>; @override @useResult - $Res call({String id, bool deleted, String object}); + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); } /// @nodoc -class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> - extends _$DeleteVectorStoreResponseCopyWithImpl<$Res, - _$DeleteVectorStoreResponseImpl> - implements _$$DeleteVectorStoreResponseImplCopyWith<$Res> { - __$$DeleteVectorStoreResponseImplCopyWithImpl( - _$DeleteVectorStoreResponseImpl _value, - $Res Function(_$DeleteVectorStoreResponseImpl) _then) +class __$$VectorStoreObjectFileCountsImplCopyWithImpl<$Res> + extends _$VectorStoreObjectFileCountsCopyWithImpl<$Res, + _$VectorStoreObjectFileCountsImpl> + implements _$$VectorStoreObjectFileCountsImplCopyWith<$Res> { + __$$VectorStoreObjectFileCountsImplCopyWithImpl( + _$VectorStoreObjectFileCountsImpl _value, + $Res Function(_$VectorStoreObjectFileCountsImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? deleted = null, - Object? object = null, + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, }) { - return _then(_$DeleteVectorStoreResponseImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - deleted: null == deleted - ? _value.deleted - : deleted // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$VectorStoreObjectFileCountsImpl( + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { - const _$DeleteVectorStoreResponseImpl( - {required this.id, required this.deleted, required this.object}) +class _$VectorStoreObjectFileCountsImpl extends _VectorStoreObjectFileCounts { + const _$VectorStoreObjectFileCountsImpl( + {@JsonKey(name: 'in_progress') required this.inProgress, + required this.completed, + required this.failed, + required this.cancelled, + required this.total}) : super._(); - factory _$DeleteVectorStoreResponseImpl.fromJson(Map json) => - _$$DeleteVectorStoreResponseImplFromJson(json); + factory _$VectorStoreObjectFileCountsImpl.fromJson( + Map json) => + _$$VectorStoreObjectFileCountsImplFromJson(json); - /// The ID of the deleted vector store. + /// The number of files that are currently being processed. @override - final String id; + @JsonKey(name: 'in_progress') + final int inProgress; - /// Whether the vector store was deleted. + /// The number of files that have been successfully processed. @override - final bool deleted; + final int completed; - /// The object type, which is always `vector_store.deleted`. + /// The number of files that have failed to process. @override - final String object; + final int failed; + + /// The number of files that were cancelled. + @override + final int cancelled; + + /// The total number of files. + @override + final int total; @override String toString() { - return 'DeleteVectorStoreResponse(id: $id, deleted: $deleted, object: $object)'; + return 'VectorStoreObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$DeleteVectorStoreResponseImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.deleted, deleted) || other.deleted == deleted) && - (identical(other.object, object) || other.object == object)); + other is _$VectorStoreObjectFileCountsImpl && + (identical(other.inProgress, inProgress) || + other.inProgress == inProgress) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed) && + (identical(other.cancelled, cancelled) || + other.cancelled == cancelled) && + (identical(other.total, total) || other.total == total)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, id, deleted, object); + int get hashCode => + Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> - get copyWith => __$$DeleteVectorStoreResponseImplCopyWithImpl< - _$DeleteVectorStoreResponseImpl>(this, _$identity); + _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> + get copyWith => __$$VectorStoreObjectFileCountsImplCopyWithImpl< + _$VectorStoreObjectFileCountsImpl>(this, _$identity); @override Map toJson() { - return _$$DeleteVectorStoreResponseImplToJson( + return _$$VectorStoreObjectFileCountsImplToJson( this, ); } } -abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { - const factory _DeleteVectorStoreResponse( - {required final String id, - required final bool deleted, - required final String object}) = _$DeleteVectorStoreResponseImpl; - const _DeleteVectorStoreResponse._() : super._(); +abstract class _VectorStoreObjectFileCounts + extends VectorStoreObjectFileCounts { + const factory _VectorStoreObjectFileCounts( + {@JsonKey(name: 'in_progress') required final int inProgress, + required final int completed, + required final int failed, + required final int cancelled, + required final int total}) = _$VectorStoreObjectFileCountsImpl; + const _VectorStoreObjectFileCounts._() : super._(); - factory _DeleteVectorStoreResponse.fromJson(Map json) = - _$DeleteVectorStoreResponseImpl.fromJson; + factory _VectorStoreObjectFileCounts.fromJson(Map json) = + _$VectorStoreObjectFileCountsImpl.fromJson; + /// The number of files that are currently being processed. @override + @JsonKey(name: 'in_progress') + int get inProgress; - /// The ID of the deleted vector store. - String get id; + /// The number of files that have been successfully processed. @override + int get completed; - /// Whether the vector store was deleted. - bool get deleted; + /// The number of files that have failed to process. @override + int get failed; - /// The object type, which is always `vector_store.deleted`. - String get object; + /// The number of files that were cancelled. @override - @JsonKey(ignore: true) - _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> + int get cancelled; + + /// The total number of files. + @override + int get total; + + /// Create a copy of VectorStoreObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreObjectFileCountsImplCopyWith<_$VectorStoreObjectFileCountsImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileObject _$VectorStoreFileObjectFromJson( +CreateVectorStoreRequest _$CreateVectorStoreRequestFromJson( Map json) { - return _VectorStoreFileObject.fromJson(json); + return _CreateVectorStoreRequest.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileObject { - /// The identifier, which can be referenced in API endpoints. - String get id => throw _privateConstructorUsedError; - - /// The object type, which is always `vector_store.file`. - String get object => throw _privateConstructorUsedError; - - /// The total vector store usage in bytes. Note that this may be different from the original file size. - @JsonKey(name: 'usage_bytes') - int get usageBytes => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the vector store file was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; +mixin _$CreateVectorStoreRequest { + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. - @JsonKey(name: 'vector_store_id') - String get vectorStoreId => throw _privateConstructorUsedError; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds => throw _privateConstructorUsedError; - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - VectorStoreFileStatus get status => throw _privateConstructorUsedError; + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => + throw _privateConstructorUsedError; - /// The last error associated with this vector store file. Will be `null` if there are no errors. - @JsonKey(name: 'last_error') - VectorStoreFileObjectLastError? get lastError => + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; + + /// Serializes this CreateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreFileObjectCopyWith get copyWith => + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CreateVectorStoreRequestCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileObjectCopyWith<$Res> { - factory $VectorStoreFileObjectCopyWith(VectorStoreFileObject value, - $Res Function(VectorStoreFileObject) then) = - _$VectorStoreFileObjectCopyWithImpl<$Res, VectorStoreFileObject>; +abstract class $CreateVectorStoreRequestCopyWith<$Res> { + factory $CreateVectorStoreRequestCopyWith(CreateVectorStoreRequest value, + $Res Function(CreateVectorStoreRequest) then) = + _$CreateVectorStoreRequestCopyWithImpl<$Res, CreateVectorStoreRequest>; @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + @JsonKey(includeIfNull: false) dynamic metadata}); - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class _$VectorStoreFileObjectCopyWithImpl<$Res, - $Val extends VectorStoreFileObject> - implements $VectorStoreFileObjectCopyWith<$Res> { - _$VectorStoreFileObjectCopyWithImpl(this._value, this._then); +class _$CreateVectorStoreRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreRequest> + implements $CreateVectorStoreRequestCopyWith<$Res> { + _$CreateVectorStoreRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? object = null, - Object? usageBytes = null, - Object? createdAt = null, - Object? vectorStoreId = null, - Object? status = null, - Object? lastError = freezed, + Object? name = freezed, + Object? fileIds = freezed, + Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - vectorStoreId: null == vectorStoreId - ? _value.vectorStoreId - : vectorStoreId // ignore: cast_nullable_to_non_nullable - as String, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileStatus, - lastError: freezed == lastError - ? _value.lastError - : lastError // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastError?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + fileIds: freezed == fileIds + ? _value.fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { - if (_value.lastError == null) { + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { return null; } - return $VectorStoreFileObjectLastErrorCopyWith<$Res>(_value.lastError!, + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, (value) { - return _then(_value.copyWith(lastError: value) as $Val); + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } + + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); }); } } /// @nodoc -abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> - implements $VectorStoreFileObjectCopyWith<$Res> { - factory _$$VectorStoreFileObjectImplCopyWith( - _$VectorStoreFileObjectImpl value, - $Res Function(_$VectorStoreFileObjectImpl) then) = - __$$VectorStoreFileObjectImplCopyWithImpl<$Res>; +abstract class _$$CreateVectorStoreRequestImplCopyWith<$Res> + implements $CreateVectorStoreRequestCopyWith<$Res> { + factory _$$CreateVectorStoreRequestImplCopyWith( + _$CreateVectorStoreRequestImpl value, + $Res Function(_$CreateVectorStoreRequestImpl) then) = + __$$CreateVectorStoreRequestImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, - String object, - @JsonKey(name: 'usage_bytes') int usageBytes, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileStatus status, - @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError}); + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + @JsonKey(includeIfNull: false) dynamic metadata}); @override - $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> - extends _$VectorStoreFileObjectCopyWithImpl<$Res, - _$VectorStoreFileObjectImpl> - implements _$$VectorStoreFileObjectImplCopyWith<$Res> { - __$$VectorStoreFileObjectImplCopyWithImpl(_$VectorStoreFileObjectImpl _value, - $Res Function(_$VectorStoreFileObjectImpl) _then) +class __$$CreateVectorStoreRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreRequestCopyWithImpl<$Res, + _$CreateVectorStoreRequestImpl> + implements _$$CreateVectorStoreRequestImplCopyWith<$Res> { + __$$CreateVectorStoreRequestImplCopyWithImpl( + _$CreateVectorStoreRequestImpl _value, + $Res Function(_$CreateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, - Object? object = null, - Object? usageBytes = null, - Object? createdAt = null, - Object? vectorStoreId = null, - Object? status = null, - Object? lastError = freezed, + Object? name = freezed, + Object? fileIds = freezed, + Object? expiresAfter = freezed, + Object? chunkingStrategy = freezed, + Object? metadata = freezed, }) { - return _then(_$VectorStoreFileObjectImpl( - id: null == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - usageBytes: null == usageBytes - ? _value.usageBytes - : usageBytes // ignore: cast_nullable_to_non_nullable - as int, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - vectorStoreId: null == vectorStoreId - ? _value.vectorStoreId - : vectorStoreId // ignore: cast_nullable_to_non_nullable - as String, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileStatus, - lastError: freezed == lastError - ? _value.lastError - : lastError // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastError?, + return _then(_$CreateVectorStoreRequestImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + fileIds: freezed == fileIds + ? _value._fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { - const _$VectorStoreFileObjectImpl( - {required this.id, - required this.object, - @JsonKey(name: 'usage_bytes') required this.usageBytes, - @JsonKey(name: 'created_at') required this.createdAt, - @JsonKey(name: 'vector_store_id') required this.vectorStoreId, - required this.status, - @JsonKey(name: 'last_error') required this.lastError}) - : super._(); - - factory _$VectorStoreFileObjectImpl.fromJson(Map json) => - _$$VectorStoreFileObjectImplFromJson(json); +class _$CreateVectorStoreRequestImpl extends _CreateVectorStoreRequest { + const _$CreateVectorStoreRequestImpl( + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'file_ids', includeIfNull: false) + final List? fileIds, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy, + @JsonKey(includeIfNull: false) this.metadata}) + : _fileIds = fileIds, + super._(); - /// The identifier, which can be referenced in API endpoints. - @override - final String id; + factory _$CreateVectorStoreRequestImpl.fromJson(Map json) => + _$$CreateVectorStoreRequestImplFromJson(json); - /// The object type, which is always `vector_store.file`. + /// The name of the vector store. @override - final String object; + @JsonKey(includeIfNull: false) + final String? name; - /// The total vector store usage in bytes. Note that this may be different from the original file size. - @override - @JsonKey(name: 'usage_bytes') - final int usageBytes; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + final List? _fileIds; - /// The Unix timestamp (in seconds) for when the vector store file was created. + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @override - @JsonKey(name: 'created_at') - final int createdAt; + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds { + final value = _fileIds; + if (value == null) return null; + if (_fileIds is EqualUnmodifiableListView) return _fileIds; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + /// The expiration policy for a vector store. @override - @JsonKey(name: 'vector_store_id') - final String vectorStoreId; + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override - final VectorStoreFileStatus status; + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; - /// The last error associated with this vector store file. Will be `null` if there are no errors. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - @JsonKey(name: 'last_error') - final VectorStoreFileObjectLastError? lastError; + @JsonKey(includeIfNull: false) + final dynamic metadata; @override String toString() { - return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError)'; + return 'CreateVectorStoreRequest(name: $name, fileIds: $fileIds, expiresAfter: $expiresAfter, chunkingStrategy: $chunkingStrategy, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileObjectImpl && - (identical(other.id, id) || other.id == id) && - (identical(other.object, object) || other.object == object) && - (identical(other.usageBytes, usageBytes) || - other.usageBytes == usageBytes) && - (identical(other.createdAt, createdAt) || - other.createdAt == createdAt) && - (identical(other.vectorStoreId, vectorStoreId) || - other.vectorStoreId == vectorStoreId) && - (identical(other.status, status) || other.status == status) && - (identical(other.lastError, lastError) || - other.lastError == lastError)); + other is _$CreateVectorStoreRequestImpl && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, id, object, usageBytes, - createdAt, vectorStoreId, status, lastError); + int get hashCode => Object.hash( + runtimeType, + name, + const DeepCollectionEquality().hash(_fileIds), + expiresAfter, + chunkingStrategy, + const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> - get copyWith => __$$VectorStoreFileObjectImplCopyWithImpl< - _$VectorStoreFileObjectImpl>(this, _$identity); + _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> + get copyWith => __$$CreateVectorStoreRequestImplCopyWithImpl< + _$CreateVectorStoreRequestImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileObjectImplToJson( + return _$$CreateVectorStoreRequestImplToJson( this, ); } } -abstract class _VectorStoreFileObject extends VectorStoreFileObject { - const factory _VectorStoreFileObject( - {required final String id, - required final String object, - @JsonKey(name: 'usage_bytes') required final int usageBytes, - @JsonKey(name: 'created_at') required final int createdAt, - @JsonKey(name: 'vector_store_id') required final String vectorStoreId, - required final VectorStoreFileStatus status, - @JsonKey(name: 'last_error') - required final VectorStoreFileObjectLastError? lastError}) = - _$VectorStoreFileObjectImpl; - const _VectorStoreFileObject._() : super._(); - - factory _VectorStoreFileObject.fromJson(Map json) = - _$VectorStoreFileObjectImpl.fromJson; - - @override +abstract class _CreateVectorStoreRequest extends CreateVectorStoreRequest { + const factory _CreateVectorStoreRequest( + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'file_ids', includeIfNull: false) + final List? fileIds, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy, + @JsonKey(includeIfNull: false) final dynamic metadata}) = + _$CreateVectorStoreRequestImpl; + const _CreateVectorStoreRequest._() : super._(); - /// The identifier, which can be referenced in API endpoints. - String get id; - @override + factory _CreateVectorStoreRequest.fromJson(Map json) = + _$CreateVectorStoreRequestImpl.fromJson; - /// The object type, which is always `vector_store.file`. - String get object; + /// The name of the vector store. @override + @JsonKey(includeIfNull: false) + String? get name; - /// The total vector store usage in bytes. Note that this may be different from the original file size. - @JsonKey(name: 'usage_bytes') - int get usageBytes; + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. @override + @JsonKey(name: 'file_ids', includeIfNull: false) + List? get fileIds; - /// The Unix timestamp (in seconds) for when the vector store file was created. - @JsonKey(name: 'created_at') - int get createdAt; + /// The expiration policy for a vector store. @override + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; - /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. - @JsonKey(name: 'vector_store_id') - String get vectorStoreId; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; - /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - VectorStoreFileStatus get status; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override + @JsonKey(includeIfNull: false) + dynamic get metadata; - /// The last error associated with this vector store file. Will be `null` if there are no errors. - @JsonKey(name: 'last_error') - VectorStoreFileObjectLastError? get lastError; + /// Create a copy of CreateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateVectorStoreRequestImplCopyWith<_$CreateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileObjectLastError _$VectorStoreFileObjectLastErrorFromJson( +UpdateVectorStoreRequest _$UpdateVectorStoreRequestFromJson( Map json) { - return _VectorStoreFileObjectLastError.fromJson(json); + return _UpdateVectorStoreRequest.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileObjectLastError { - /// One of `server_error` or `rate_limit_exceeded`. - VectorStoreFileObjectLastErrorCode get code => +mixin _$UpdateVectorStoreRequest { + /// The name of the vector store. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + /// The expiration policy for a vector store. + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter => throw _privateConstructorUsedError; - /// A human-readable description of the error. - String get message => throw _privateConstructorUsedError; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; + /// Serializes this UpdateVectorStoreRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreFileObjectLastErrorCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $UpdateVectorStoreRequestCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileObjectLastErrorCopyWith<$Res> { - factory $VectorStoreFileObjectLastErrorCopyWith( - VectorStoreFileObjectLastError value, - $Res Function(VectorStoreFileObjectLastError) then) = - _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - VectorStoreFileObjectLastError>; +abstract class $UpdateVectorStoreRequestCopyWith<$Res> { + factory $UpdateVectorStoreRequestCopyWith(UpdateVectorStoreRequest value, + $Res Function(UpdateVectorStoreRequest) then) = + _$UpdateVectorStoreRequestCopyWithImpl<$Res, UpdateVectorStoreRequest>; @useResult - $Res call({VectorStoreFileObjectLastErrorCode code, String message}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - $Val extends VectorStoreFileObjectLastError> - implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { - _$VectorStoreFileObjectLastErrorCopyWithImpl(this._value, this._then); +class _$UpdateVectorStoreRequestCopyWithImpl<$Res, + $Val extends UpdateVectorStoreRequest> + implements $UpdateVectorStoreRequestCopyWith<$Res> { + _$UpdateVectorStoreRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = null, - Object? message = null, + Object? name = freezed, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { return _then(_value.copyWith( - code: null == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastErrorCode, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, ) as $Val); } + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter { + if (_value.expiresAfter == null) { + return null; + } + + return $VectorStoreExpirationAfterCopyWith<$Res>(_value.expiresAfter!, + (value) { + return _then(_value.copyWith(expiresAfter: value) as $Val); + }); + } } /// @nodoc -abstract class _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> - implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { - factory _$$VectorStoreFileObjectLastErrorImplCopyWith( - _$VectorStoreFileObjectLastErrorImpl value, - $Res Function(_$VectorStoreFileObjectLastErrorImpl) then) = - __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res>; +abstract class _$$UpdateVectorStoreRequestImplCopyWith<$Res> + implements $UpdateVectorStoreRequestCopyWith<$Res> { + factory _$$UpdateVectorStoreRequestImplCopyWith( + _$UpdateVectorStoreRequestImpl value, + $Res Function(_$UpdateVectorStoreRequestImpl) then) = + __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({VectorStoreFileObjectLastErrorCode code, String message}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $VectorStoreExpirationAfterCopyWith<$Res>? get expiresAfter; } /// @nodoc -class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> - extends _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, - _$VectorStoreFileObjectLastErrorImpl> - implements _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> { - __$$VectorStoreFileObjectLastErrorImplCopyWithImpl( - _$VectorStoreFileObjectLastErrorImpl _value, - $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) +class __$$UpdateVectorStoreRequestImplCopyWithImpl<$Res> + extends _$UpdateVectorStoreRequestCopyWithImpl<$Res, + _$UpdateVectorStoreRequestImpl> + implements _$$UpdateVectorStoreRequestImplCopyWith<$Res> { + __$$UpdateVectorStoreRequestImplCopyWithImpl( + _$UpdateVectorStoreRequestImpl _value, + $Res Function(_$UpdateVectorStoreRequestImpl) _then) : super(_value, _then); + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = null, - Object? message = null, + Object? name = freezed, + Object? expiresAfter = freezed, + Object? metadata = freezed, }) { - return _then(_$VectorStoreFileObjectLastErrorImpl( - code: null == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as VectorStoreFileObjectLastErrorCode, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$UpdateVectorStoreRequestImpl( + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + expiresAfter: freezed == expiresAfter + ? _value.expiresAfter + : expiresAfter // ignore: cast_nullable_to_non_nullable + as VectorStoreExpirationAfter?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileObjectLastErrorImpl - extends _VectorStoreFileObjectLastError { - const _$VectorStoreFileObjectLastErrorImpl( - {required this.code, required this.message}) +class _$UpdateVectorStoreRequestImpl extends _UpdateVectorStoreRequest { + const _$UpdateVectorStoreRequestImpl( + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'expires_after', includeIfNull: false) this.expiresAfter, + @JsonKey(includeIfNull: false) this.metadata}) : super._(); - factory _$VectorStoreFileObjectLastErrorImpl.fromJson( - Map json) => - _$$VectorStoreFileObjectLastErrorImplFromJson(json); + factory _$UpdateVectorStoreRequestImpl.fromJson(Map json) => + _$$UpdateVectorStoreRequestImplFromJson(json); - /// One of `server_error` or `rate_limit_exceeded`. + /// The name of the vector store. @override - final VectorStoreFileObjectLastErrorCode code; + @JsonKey(includeIfNull: false) + final String? name; - /// A human-readable description of the error. + /// The expiration policy for a vector store. @override - final String message; + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + final dynamic metadata; @override String toString() { - return 'VectorStoreFileObjectLastError(code: $code, message: $message)'; + return 'UpdateVectorStoreRequest(name: $name, expiresAfter: $expiresAfter, metadata: $metadata)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileObjectLastErrorImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message)); + other is _$UpdateVectorStoreRequestImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.expiresAfter, expiresAfter) || + other.expiresAfter == expiresAfter) && + const DeepCollectionEquality().equals(other.metadata, metadata)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, code, message); + int get hashCode => Object.hash(runtimeType, name, expiresAfter, + const DeepCollectionEquality().hash(metadata)); - @JsonKey(ignore: true) + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileObjectLastErrorImplCopyWith< - _$VectorStoreFileObjectLastErrorImpl> - get copyWith => __$$VectorStoreFileObjectLastErrorImplCopyWithImpl< - _$VectorStoreFileObjectLastErrorImpl>(this, _$identity); + _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> + get copyWith => __$$UpdateVectorStoreRequestImplCopyWithImpl< + _$UpdateVectorStoreRequestImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileObjectLastErrorImplToJson( + return _$$UpdateVectorStoreRequestImplToJson( this, ); } } -abstract class _VectorStoreFileObjectLastError - extends VectorStoreFileObjectLastError { - const factory _VectorStoreFileObjectLastError( - {required final VectorStoreFileObjectLastErrorCode code, - required final String message}) = _$VectorStoreFileObjectLastErrorImpl; - const _VectorStoreFileObjectLastError._() : super._(); +abstract class _UpdateVectorStoreRequest extends UpdateVectorStoreRequest { + const factory _UpdateVectorStoreRequest( + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'expires_after', includeIfNull: false) + final VectorStoreExpirationAfter? expiresAfter, + @JsonKey(includeIfNull: false) final dynamic metadata}) = + _$UpdateVectorStoreRequestImpl; + const _UpdateVectorStoreRequest._() : super._(); - factory _VectorStoreFileObjectLastError.fromJson(Map json) = - _$VectorStoreFileObjectLastErrorImpl.fromJson; + factory _UpdateVectorStoreRequest.fromJson(Map json) = + _$UpdateVectorStoreRequestImpl.fromJson; + /// The name of the vector store. @override + @JsonKey(includeIfNull: false) + String? get name; - /// One of `server_error` or `rate_limit_exceeded`. - VectorStoreFileObjectLastErrorCode get code; + /// The expiration policy for a vector store. @override + @JsonKey(name: 'expires_after', includeIfNull: false) + VectorStoreExpirationAfter? get expiresAfter; - /// A human-readable description of the error. - String get message; + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @override - @JsonKey(ignore: true) - _$$VectorStoreFileObjectLastErrorImplCopyWith< - _$VectorStoreFileObjectLastErrorImpl> + @JsonKey(includeIfNull: false) + dynamic get metadata; + + /// Create a copy of UpdateVectorStoreRequest + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$UpdateVectorStoreRequestImplCopyWith<_$UpdateVectorStoreRequestImpl> get copyWith => throw _privateConstructorUsedError; } -CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( +ListVectorStoresResponse _$ListVectorStoresResponseFromJson( Map json) { - return _CreateVectorStoreFileRequest.fromJson(json); + return _ListVectorStoresResponse.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreFileRequest { - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_id') - String get fileId => throw _privateConstructorUsedError; +mixin _$ListVectorStoresResponse { + /// The object type, which is always `list`. + String get object => throw _privateConstructorUsedError; + + /// A list of assistant files. + List get data => throw _privateConstructorUsedError; + + /// The ID of the first assistant file in the list. + @JsonKey(name: 'first_id') + String? get firstId => throw _privateConstructorUsedError; + + /// The ID of the last assistant file in the list. + @JsonKey(name: 'last_id') + String? get lastId => throw _privateConstructorUsedError; + + /// Whether there are more assistant files available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoresResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateVectorStoreFileRequestCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ListVectorStoresResponseCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { - factory $CreateVectorStoreFileRequestCopyWith( - CreateVectorStoreFileRequest value, - $Res Function(CreateVectorStoreFileRequest) then) = - _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - CreateVectorStoreFileRequest>; +abstract class $ListVectorStoresResponseCopyWith<$Res> { + factory $ListVectorStoresResponseCopyWith(ListVectorStoresResponse value, + $Res Function(ListVectorStoresResponse) then) = + _$ListVectorStoresResponseCopyWithImpl<$Res, ListVectorStoresResponse>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call( + {String object, + List data, + @JsonKey(name: 'first_id') String? firstId, + @JsonKey(name: 'last_id') String? lastId, + @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreFileRequest> - implements $CreateVectorStoreFileRequestCopyWith<$Res> { - _$CreateVectorStoreFileRequestCopyWithImpl(this._value, this._then); +class _$ListVectorStoresResponseCopyWithImpl<$Res, + $Val extends ListVectorStoresResponse> + implements $ListVectorStoresResponseCopyWith<$Res> { + _$ListVectorStoresResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, + Object? object = null, + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, }) { return _then(_value.copyWith( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable as String, + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, ) as $Val); } } /// @nodoc -abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> - implements $CreateVectorStoreFileRequestCopyWith<$Res> { - factory _$$CreateVectorStoreFileRequestImplCopyWith( - _$CreateVectorStoreFileRequestImpl value, - $Res Function(_$CreateVectorStoreFileRequestImpl) then) = - __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; - @override - @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); -} - -/// @nodoc -class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreFileRequestCopyWithImpl<$Res, - _$CreateVectorStoreFileRequestImpl> - implements _$$CreateVectorStoreFileRequestImplCopyWith<$Res> { - __$$CreateVectorStoreFileRequestImplCopyWithImpl( - _$CreateVectorStoreFileRequestImpl _value, - $Res Function(_$CreateVectorStoreFileRequestImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? fileId = null, - }) { - return _then(_$CreateVectorStoreFileRequestImpl( - fileId: null == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable - as String, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { - const _$CreateVectorStoreFileRequestImpl( - {@JsonKey(name: 'file_id') required this.fileId}) - : super._(); - - factory _$CreateVectorStoreFileRequestImpl.fromJson( - Map json) => - _$$CreateVectorStoreFileRequestImplFromJson(json); - - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. - @override - @JsonKey(name: 'file_id') - final String fileId; - - @override - String toString() { - return 'CreateVectorStoreFileRequest(fileId: $fileId)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$CreateVectorStoreFileRequestImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, fileId); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$CreateVectorStoreFileRequestImplCopyWith< - _$CreateVectorStoreFileRequestImpl> - get copyWith => __$$CreateVectorStoreFileRequestImplCopyWithImpl< - _$CreateVectorStoreFileRequestImpl>(this, _$identity); - - @override - Map toJson() { - return _$$CreateVectorStoreFileRequestImplToJson( - this, - ); - } -} - -abstract class _CreateVectorStoreFileRequest - extends CreateVectorStoreFileRequest { - const factory _CreateVectorStoreFileRequest( - {@JsonKey(name: 'file_id') required final String fileId}) = - _$CreateVectorStoreFileRequestImpl; - const _CreateVectorStoreFileRequest._() : super._(); - - factory _CreateVectorStoreFileRequest.fromJson(Map json) = - _$CreateVectorStoreFileRequestImpl.fromJson; - - @override - - /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_id') - String get fileId; - @override - @JsonKey(ignore: true) - _$$CreateVectorStoreFileRequestImplCopyWith< - _$CreateVectorStoreFileRequestImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ListVectorStoreFilesResponse _$ListVectorStoreFilesResponseFromJson( - Map json) { - return _ListVectorStoreFilesResponse.fromJson(json); -} - -/// @nodoc -mixin _$ListVectorStoreFilesResponse { - /// The object type, which is always `list`. - String get object => throw _privateConstructorUsedError; - - /// A list of message files. - List get data => throw _privateConstructorUsedError; - - /// The ID of the first message file in the list. - @JsonKey(name: 'first_id') - String get firstId => throw _privateConstructorUsedError; - - /// The ID of the last message file in the list. - @JsonKey(name: 'last_id') - String get lastId => throw _privateConstructorUsedError; - - /// Whether there are more message files available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ListVectorStoreFilesResponseCopyWith - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ListVectorStoreFilesResponseCopyWith<$Res> { - factory $ListVectorStoreFilesResponseCopyWith( - ListVectorStoreFilesResponse value, - $Res Function(ListVectorStoreFilesResponse) then) = - _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - ListVectorStoreFilesResponse>; - @useResult - $Res call( - {String object, - List data, - @JsonKey(name: 'first_id') String firstId, - @JsonKey(name: 'last_id') String lastId, - @JsonKey(name: 'has_more') bool hasMore}); -} - -/// @nodoc -class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - $Val extends ListVectorStoreFilesResponse> - implements $ListVectorStoreFilesResponseCopyWith<$Res> { - _$ListVectorStoreFilesResponseCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? object = null, - Object? data = null, - Object? firstId = null, - Object? lastId = null, - Object? hasMore = null, - }) { - return _then(_value.copyWith( - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as String, - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: null == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String, - lastId: null == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable - as String, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$ListVectorStoreFilesResponseImplCopyWith<$Res> - implements $ListVectorStoreFilesResponseCopyWith<$Res> { - factory _$$ListVectorStoreFilesResponseImplCopyWith( - _$ListVectorStoreFilesResponseImpl value, - $Res Function(_$ListVectorStoreFilesResponseImpl) then) = - __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res>; +abstract class _$$ListVectorStoresResponseImplCopyWith<$Res> + implements $ListVectorStoresResponseCopyWith<$Res> { + factory _$$ListVectorStoresResponseImplCopyWith( + _$ListVectorStoresResponseImpl value, + $Res Function(_$ListVectorStoresResponseImpl) then) = + __$$ListVectorStoresResponseImplCopyWithImpl<$Res>; @override @useResult $Res call( {String object, - List data, - @JsonKey(name: 'first_id') String firstId, - @JsonKey(name: 'last_id') String lastId, + List data, + @JsonKey(name: 'first_id') String? firstId, + @JsonKey(name: 'last_id') String? lastId, @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> - extends _$ListVectorStoreFilesResponseCopyWithImpl<$Res, - _$ListVectorStoreFilesResponseImpl> - implements _$$ListVectorStoreFilesResponseImplCopyWith<$Res> { - __$$ListVectorStoreFilesResponseImplCopyWithImpl( - _$ListVectorStoreFilesResponseImpl _value, - $Res Function(_$ListVectorStoreFilesResponseImpl) _then) +class __$$ListVectorStoresResponseImplCopyWithImpl<$Res> + extends _$ListVectorStoresResponseCopyWithImpl<$Res, + _$ListVectorStoresResponseImpl> + implements _$$ListVectorStoresResponseImplCopyWith<$Res> { + __$$ListVectorStoresResponseImplCopyWithImpl( + _$ListVectorStoresResponseImpl _value, + $Res Function(_$ListVectorStoresResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? object = null, Object? data = null, - Object? firstId = null, - Object? lastId = null, + Object? firstId = freezed, + Object? lastId = freezed, Object? hasMore = null, }) { - return _then(_$ListVectorStoreFilesResponseImpl( + return _then(_$ListVectorStoresResponseImpl( object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable @@ -46724,15 +49921,15 @@ class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> data: null == data ? _value._data : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: null == firstId + as List, + firstId: freezed == firstId ? _value.firstId : firstId // ignore: cast_nullable_to_non_nullable - as String, - lastId: null == lastId + as String?, + lastId: freezed == lastId ? _value.lastId : lastId // ignore: cast_nullable_to_non_nullable - as String, + as String?, hasMore: null == hasMore ? _value.hasMore : hasMore // ignore: cast_nullable_to_non_nullable @@ -46743,60 +49940,59 @@ class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { - const _$ListVectorStoreFilesResponseImpl( +class _$ListVectorStoresResponseImpl extends _ListVectorStoresResponse { + const _$ListVectorStoresResponseImpl( {required this.object, - required final List data, + required final List data, @JsonKey(name: 'first_id') required this.firstId, @JsonKey(name: 'last_id') required this.lastId, @JsonKey(name: 'has_more') required this.hasMore}) : _data = data, super._(); - factory _$ListVectorStoreFilesResponseImpl.fromJson( - Map json) => - _$$ListVectorStoreFilesResponseImplFromJson(json); + factory _$ListVectorStoresResponseImpl.fromJson(Map json) => + _$$ListVectorStoresResponseImplFromJson(json); /// The object type, which is always `list`. @override final String object; - /// A list of message files. - final List _data; + /// A list of assistant files. + final List _data; - /// A list of message files. + /// A list of assistant files. @override - List get data { + List get data { if (_data is EqualUnmodifiableListView) return _data; // ignore: implicit_dynamic_type return EqualUnmodifiableListView(_data); } - /// The ID of the first message file in the list. + /// The ID of the first assistant file in the list. @override @JsonKey(name: 'first_id') - final String firstId; + final String? firstId; - /// The ID of the last message file in the list. + /// The ID of the last assistant file in the list. @override @JsonKey(name: 'last_id') - final String lastId; + final String? lastId; - /// Whether there are more message files available. + /// Whether there are more assistant files available. @override @JsonKey(name: 'has_more') final bool hasMore; @override String toString() { - return 'ListVectorStoreFilesResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; + return 'ListVectorStoresResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ListVectorStoreFilesResponseImpl && + other is _$ListVectorStoresResponseImpl && (identical(other.object, object) || other.object == object) && const DeepCollectionEquality().equals(other._data, _data) && (identical(other.firstId, firstId) || other.firstId == firstId) && @@ -46804,115 +50000,120 @@ class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, object, const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ListVectorStoreFilesResponseImplCopyWith< - _$ListVectorStoreFilesResponseImpl> - get copyWith => __$$ListVectorStoreFilesResponseImplCopyWithImpl< - _$ListVectorStoreFilesResponseImpl>(this, _$identity); + _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> + get copyWith => __$$ListVectorStoresResponseImplCopyWithImpl< + _$ListVectorStoresResponseImpl>(this, _$identity); @override Map toJson() { - return _$$ListVectorStoreFilesResponseImplToJson( + return _$$ListVectorStoresResponseImplToJson( this, ); } } -abstract class _ListVectorStoreFilesResponse - extends ListVectorStoreFilesResponse { - const factory _ListVectorStoreFilesResponse( +abstract class _ListVectorStoresResponse extends ListVectorStoresResponse { + const factory _ListVectorStoresResponse( {required final String object, - required final List data, - @JsonKey(name: 'first_id') required final String firstId, - @JsonKey(name: 'last_id') required final String lastId, + required final List data, + @JsonKey(name: 'first_id') required final String? firstId, + @JsonKey(name: 'last_id') required final String? lastId, @JsonKey(name: 'has_more') required final bool hasMore}) = - _$ListVectorStoreFilesResponseImpl; - const _ListVectorStoreFilesResponse._() : super._(); - - factory _ListVectorStoreFilesResponse.fromJson(Map json) = - _$ListVectorStoreFilesResponseImpl.fromJson; + _$ListVectorStoresResponseImpl; + const _ListVectorStoresResponse._() : super._(); - @override + factory _ListVectorStoresResponse.fromJson(Map json) = + _$ListVectorStoresResponseImpl.fromJson; /// The object type, which is always `list`. - String get object; @override + String get object; - /// A list of message files. - List get data; + /// A list of assistant files. @override + List get data; - /// The ID of the first message file in the list. - @JsonKey(name: 'first_id') - String get firstId; + /// The ID of the first assistant file in the list. @override + @JsonKey(name: 'first_id') + String? get firstId; - /// The ID of the last message file in the list. - @JsonKey(name: 'last_id') - String get lastId; + /// The ID of the last assistant file in the list. @override + @JsonKey(name: 'last_id') + String? get lastId; - /// Whether there are more message files available. + /// Whether there are more assistant files available. + @override @JsonKey(name: 'has_more') bool get hasMore; + + /// Create a copy of ListVectorStoresResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ListVectorStoreFilesResponseImplCopyWith< - _$ListVectorStoreFilesResponseImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ListVectorStoresResponseImplCopyWith<_$ListVectorStoresResponseImpl> get copyWith => throw _privateConstructorUsedError; } -DeleteVectorStoreFileResponse _$DeleteVectorStoreFileResponseFromJson( +DeleteVectorStoreResponse _$DeleteVectorStoreResponseFromJson( Map json) { - return _DeleteVectorStoreFileResponse.fromJson(json); + return _DeleteVectorStoreResponse.fromJson(json); } /// @nodoc -mixin _$DeleteVectorStoreFileResponse { - /// The ID of the deleted vector store file. +mixin _$DeleteVectorStoreResponse { + /// The ID of the deleted vector store. String get id => throw _privateConstructorUsedError; - /// Whether the vector store file was deleted. + /// Whether the vector store was deleted. bool get deleted => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.file.deleted`. + /// The object type, which is always `vector_store.deleted`. String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $DeleteVectorStoreFileResponseCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $DeleteVectorStoreResponseCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $DeleteVectorStoreFileResponseCopyWith<$Res> { - factory $DeleteVectorStoreFileResponseCopyWith( - DeleteVectorStoreFileResponse value, - $Res Function(DeleteVectorStoreFileResponse) then) = - _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - DeleteVectorStoreFileResponse>; +abstract class $DeleteVectorStoreResponseCopyWith<$Res> { + factory $DeleteVectorStoreResponseCopyWith(DeleteVectorStoreResponse value, + $Res Function(DeleteVectorStoreResponse) then) = + _$DeleteVectorStoreResponseCopyWithImpl<$Res, DeleteVectorStoreResponse>; @useResult $Res call({String id, bool deleted, String object}); } /// @nodoc -class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - $Val extends DeleteVectorStoreFileResponse> - implements $DeleteVectorStoreFileResponseCopyWith<$Res> { - _$DeleteVectorStoreFileResponseCopyWithImpl(this._value, this._then); +class _$DeleteVectorStoreResponseCopyWithImpl<$Res, + $Val extends DeleteVectorStoreResponse> + implements $DeleteVectorStoreResponseCopyWith<$Res> { + _$DeleteVectorStoreResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46938,27 +50139,29 @@ class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, } /// @nodoc -abstract class _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> - implements $DeleteVectorStoreFileResponseCopyWith<$Res> { - factory _$$DeleteVectorStoreFileResponseImplCopyWith( - _$DeleteVectorStoreFileResponseImpl value, - $Res Function(_$DeleteVectorStoreFileResponseImpl) then) = - __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res>; +abstract class _$$DeleteVectorStoreResponseImplCopyWith<$Res> + implements $DeleteVectorStoreResponseCopyWith<$Res> { + factory _$$DeleteVectorStoreResponseImplCopyWith( + _$DeleteVectorStoreResponseImpl value, + $Res Function(_$DeleteVectorStoreResponseImpl) then) = + __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res>; @override @useResult $Res call({String id, bool deleted, String object}); } /// @nodoc -class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> - extends _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, - _$DeleteVectorStoreFileResponseImpl> - implements _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> { - __$$DeleteVectorStoreFileResponseImplCopyWithImpl( - _$DeleteVectorStoreFileResponseImpl _value, - $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) +class __$$DeleteVectorStoreResponseImplCopyWithImpl<$Res> + extends _$DeleteVectorStoreResponseCopyWithImpl<$Res, + _$DeleteVectorStoreResponseImpl> + implements _$$DeleteVectorStoreResponseImplCopyWith<$Res> { + __$$DeleteVectorStoreResponseImplCopyWithImpl( + _$DeleteVectorStoreResponseImpl _value, + $Res Function(_$DeleteVectorStoreResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -46966,7 +50169,7 @@ class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> Object? deleted = null, Object? object = null, }) { - return _then(_$DeleteVectorStoreFileResponseImpl( + return _then(_$DeleteVectorStoreResponseImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable @@ -46985,107 +50188,110 @@ class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$DeleteVectorStoreFileResponseImpl - extends _DeleteVectorStoreFileResponse { - const _$DeleteVectorStoreFileResponseImpl( +class _$DeleteVectorStoreResponseImpl extends _DeleteVectorStoreResponse { + const _$DeleteVectorStoreResponseImpl( {required this.id, required this.deleted, required this.object}) : super._(); - factory _$DeleteVectorStoreFileResponseImpl.fromJson( - Map json) => - _$$DeleteVectorStoreFileResponseImplFromJson(json); + factory _$DeleteVectorStoreResponseImpl.fromJson(Map json) => + _$$DeleteVectorStoreResponseImplFromJson(json); - /// The ID of the deleted vector store file. + /// The ID of the deleted vector store. @override final String id; - /// Whether the vector store file was deleted. + /// Whether the vector store was deleted. @override final bool deleted; - /// The object type, which is always `vector_store.file.deleted`. + /// The object type, which is always `vector_store.deleted`. @override final String object; @override String toString() { - return 'DeleteVectorStoreFileResponse(id: $id, deleted: $deleted, object: $object)'; + return 'DeleteVectorStoreResponse(id: $id, deleted: $deleted, object: $object)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$DeleteVectorStoreFileResponseImpl && + other is _$DeleteVectorStoreResponseImpl && (identical(other.id, id) || other.id == id) && (identical(other.deleted, deleted) || other.deleted == deleted) && (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$DeleteVectorStoreFileResponseImplCopyWith< - _$DeleteVectorStoreFileResponseImpl> - get copyWith => __$$DeleteVectorStoreFileResponseImplCopyWithImpl< - _$DeleteVectorStoreFileResponseImpl>(this, _$identity); + _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> + get copyWith => __$$DeleteVectorStoreResponseImplCopyWithImpl< + _$DeleteVectorStoreResponseImpl>(this, _$identity); @override Map toJson() { - return _$$DeleteVectorStoreFileResponseImplToJson( + return _$$DeleteVectorStoreResponseImplToJson( this, ); } } -abstract class _DeleteVectorStoreFileResponse - extends DeleteVectorStoreFileResponse { - const factory _DeleteVectorStoreFileResponse( +abstract class _DeleteVectorStoreResponse extends DeleteVectorStoreResponse { + const factory _DeleteVectorStoreResponse( {required final String id, required final bool deleted, - required final String object}) = _$DeleteVectorStoreFileResponseImpl; - const _DeleteVectorStoreFileResponse._() : super._(); + required final String object}) = _$DeleteVectorStoreResponseImpl; + const _DeleteVectorStoreResponse._() : super._(); - factory _DeleteVectorStoreFileResponse.fromJson(Map json) = - _$DeleteVectorStoreFileResponseImpl.fromJson; + factory _DeleteVectorStoreResponse.fromJson(Map json) = + _$DeleteVectorStoreResponseImpl.fromJson; + /// The ID of the deleted vector store. @override - - /// The ID of the deleted vector store file. String get id; - @override - /// Whether the vector store file was deleted. - bool get deleted; + /// Whether the vector store was deleted. @override + bool get deleted; - /// The object type, which is always `vector_store.file.deleted`. + /// The object type, which is always `vector_store.deleted`. + @override String get object; + + /// Create a copy of DeleteVectorStoreResponse + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$DeleteVectorStoreFileResponseImplCopyWith< - _$DeleteVectorStoreFileResponseImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$DeleteVectorStoreResponseImplCopyWith<_$DeleteVectorStoreResponseImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileBatchObject _$VectorStoreFileBatchObjectFromJson( +VectorStoreFileObject _$VectorStoreFileObjectFromJson( Map json) { - return _VectorStoreFileBatchObject.fromJson(json); + return _VectorStoreFileObject.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileBatchObject { +mixin _$VectorStoreFileObject { /// The identifier, which can be referenced in API endpoints. String get id => throw _privateConstructorUsedError; - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `vector_store.file`. String get object => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. + /// The total vector store usage in bytes. Note that this may be different from the original file size. + @JsonKey(name: 'usage_bytes') + int get usageBytes => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the vector store file was created. @JsonKey(name: 'created_at') int get createdAt => throw _privateConstructorUsedError; @@ -47093,60 +50299,75 @@ mixin _$VectorStoreFileBatchObject { @JsonKey(name: 'vector_store_id') String get vectorStoreId => throw _privateConstructorUsedError; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - VectorStoreFileBatchObjectStatus get status => + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + VectorStoreFileStatus get status => throw _privateConstructorUsedError; + + /// The last error associated with this vector store file. Will be `null` if there are no errors. + @JsonKey(name: 'last_error') + VectorStoreFileObjectLastError? get lastError => throw _privateConstructorUsedError; - /// The number of files per status. - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts get fileCounts => + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreFileBatchObjectCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreFileObjectCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileBatchObjectCopyWith<$Res> { - factory $VectorStoreFileBatchObjectCopyWith(VectorStoreFileBatchObject value, - $Res Function(VectorStoreFileBatchObject) then) = - _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - VectorStoreFileBatchObject>; +abstract class $VectorStoreFileObjectCopyWith<$Res> { + factory $VectorStoreFileObjectCopyWith(VectorStoreFileObject value, + $Res Function(VectorStoreFileObject) then) = + _$VectorStoreFileObjectCopyWithImpl<$Res, VectorStoreFileObject>; @useResult $Res call( {String id, String object, + @JsonKey(name: 'usage_bytes') int usageBytes, @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts fileCounts}); + VectorStoreFileStatus status, + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - $Val extends VectorStoreFileBatchObject> - implements $VectorStoreFileBatchObjectCopyWith<$Res> { - _$VectorStoreFileBatchObjectCopyWithImpl(this._value, this._then); +class _$VectorStoreFileObjectCopyWithImpl<$Res, + $Val extends VectorStoreFileObject> + implements $VectorStoreFileObjectCopyWith<$Res> { + _$VectorStoreFileObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, + Object? usageBytes = null, Object? createdAt = null, Object? vectorStoreId = null, Object? status = null, - Object? fileCounts = null, + Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( id: null == id @@ -47157,6 +50378,10 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable @@ -47168,67 +50393,99 @@ class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, status: null == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectStatus, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectFileCounts, + as VectorStoreFileStatus, + lastError: freezed == lastError + ? _value.lastError + : lastError // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, ) as $Val); } + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { - return $VectorStoreFileBatchObjectFileCountsCopyWith<$Res>( - _value.fileCounts, (value) { - return _then(_value.copyWith(fileCounts: value) as $Val); - }); - } -} - + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError { + if (_value.lastError == null) { + return null; + } + + return $VectorStoreFileObjectLastErrorCopyWith<$Res>(_value.lastError!, + (value) { + return _then(_value.copyWith(lastError: value) as $Val); + }); + } + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyResponseParamCopyWith<$Res>( + _value.chunkingStrategy!, (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } +} + /// @nodoc -abstract class _$$VectorStoreFileBatchObjectImplCopyWith<$Res> - implements $VectorStoreFileBatchObjectCopyWith<$Res> { - factory _$$VectorStoreFileBatchObjectImplCopyWith( - _$VectorStoreFileBatchObjectImpl value, - $Res Function(_$VectorStoreFileBatchObjectImpl) then) = - __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileObjectImplCopyWith<$Res> + implements $VectorStoreFileObjectCopyWith<$Res> { + factory _$$VectorStoreFileObjectImplCopyWith( + _$VectorStoreFileObjectImpl value, + $Res Function(_$VectorStoreFileObjectImpl) then) = + __$$VectorStoreFileObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( {String id, String object, + @JsonKey(name: 'usage_bytes') int usageBytes, @JsonKey(name: 'created_at') int createdAt, @JsonKey(name: 'vector_store_id') String vectorStoreId, - VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts fileCounts}); + VectorStoreFileStatus status, + @JsonKey(name: 'last_error') VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy}); @override - $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; + $VectorStoreFileObjectLastErrorCopyWith<$Res>? get lastError; + @override + $ChunkingStrategyResponseParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> - extends _$VectorStoreFileBatchObjectCopyWithImpl<$Res, - _$VectorStoreFileBatchObjectImpl> - implements _$$VectorStoreFileBatchObjectImplCopyWith<$Res> { - __$$VectorStoreFileBatchObjectImplCopyWithImpl( - _$VectorStoreFileBatchObjectImpl _value, - $Res Function(_$VectorStoreFileBatchObjectImpl) _then) +class __$$VectorStoreFileObjectImplCopyWithImpl<$Res> + extends _$VectorStoreFileObjectCopyWithImpl<$Res, + _$VectorStoreFileObjectImpl> + implements _$$VectorStoreFileObjectImplCopyWith<$Res> { + __$$VectorStoreFileObjectImplCopyWithImpl(_$VectorStoreFileObjectImpl _value, + $Res Function(_$VectorStoreFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, Object? object = null, + Object? usageBytes = null, Object? createdAt = null, Object? vectorStoreId = null, Object? status = null, - Object? fileCounts = null, + Object? lastError = freezed, + Object? chunkingStrategy = freezed, }) { - return _then(_$VectorStoreFileBatchObjectImpl( + return _then(_$VectorStoreFileObjectImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable @@ -47237,6 +50494,10 @@ class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> ? _value.object : object // ignore: cast_nullable_to_non_nullable as String, + usageBytes: null == usageBytes + ? _value.usageBytes + : usageBytes // ignore: cast_nullable_to_non_nullable + as int, createdAt: null == createdAt ? _value.createdAt : createdAt // ignore: cast_nullable_to_non_nullable @@ -47248,40 +50509,51 @@ class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> status: null == status ? _value.status : status // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectStatus, - fileCounts: null == fileCounts - ? _value.fileCounts - : fileCounts // ignore: cast_nullable_to_non_nullable - as VectorStoreFileBatchObjectFileCounts, + as VectorStoreFileStatus, + lastError: freezed == lastError + ? _value.lastError + : lastError // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastError?, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyResponseParam?, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { - const _$VectorStoreFileBatchObjectImpl( +class _$VectorStoreFileObjectImpl extends _VectorStoreFileObject { + const _$VectorStoreFileObjectImpl( {required this.id, required this.object, + @JsonKey(name: 'usage_bytes') required this.usageBytes, @JsonKey(name: 'created_at') required this.createdAt, @JsonKey(name: 'vector_store_id') required this.vectorStoreId, required this.status, - @JsonKey(name: 'file_counts') required this.fileCounts}) + @JsonKey(name: 'last_error') required this.lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); - factory _$VectorStoreFileBatchObjectImpl.fromJson( - Map json) => - _$$VectorStoreFileBatchObjectImplFromJson(json); + factory _$VectorStoreFileObjectImpl.fromJson(Map json) => + _$$VectorStoreFileObjectImplFromJson(json); /// The identifier, which can be referenced in API endpoints. @override final String id; - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `vector_store.file`. @override final String object; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. + /// The total vector store usage in bytes. Note that this may be different from the original file size. + @override + @JsonKey(name: 'usage_bytes') + final int usageBytes; + + /// The Unix timestamp (in seconds) for when the vector store file was created. @override @JsonKey(name: 'created_at') final int createdAt; @@ -47291,3065 +50563,7938 @@ class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { @JsonKey(name: 'vector_store_id') final String vectorStoreId; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. @override - final VectorStoreFileBatchObjectStatus status; + final VectorStoreFileStatus status; - /// The number of files per status. + /// The last error associated with this vector store file. Will be `null` if there are no errors. @override - @JsonKey(name: 'file_counts') - final VectorStoreFileBatchObjectFileCounts fileCounts; + @JsonKey(name: 'last_error') + final VectorStoreFileObjectLastError? lastError; + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy; @override String toString() { - return 'VectorStoreFileBatchObject(id: $id, object: $object, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, fileCounts: $fileCounts)'; + return 'VectorStoreFileObject(id: $id, object: $object, usageBytes: $usageBytes, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, lastError: $lastError, chunkingStrategy: $chunkingStrategy)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileBatchObjectImpl && + other is _$VectorStoreFileObjectImpl && (identical(other.id, id) || other.id == id) && (identical(other.object, object) || other.object == object) && + (identical(other.usageBytes, usageBytes) || + other.usageBytes == usageBytes) && (identical(other.createdAt, createdAt) || other.createdAt == createdAt) && (identical(other.vectorStoreId, vectorStoreId) || other.vectorStoreId == vectorStoreId) && (identical(other.status, status) || other.status == status) && - (identical(other.fileCounts, fileCounts) || - other.fileCounts == fileCounts)); + (identical(other.lastError, lastError) || + other.lastError == lastError) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); + int get hashCode => Object.hash(runtimeType, id, object, usageBytes, + createdAt, vectorStoreId, status, lastError, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> - get copyWith => __$$VectorStoreFileBatchObjectImplCopyWithImpl< - _$VectorStoreFileBatchObjectImpl>(this, _$identity); + _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> + get copyWith => __$$VectorStoreFileObjectImplCopyWithImpl< + _$VectorStoreFileObjectImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileBatchObjectImplToJson( + return _$$VectorStoreFileObjectImplToJson( this, ); } } -abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { - const factory _VectorStoreFileBatchObject( +abstract class _VectorStoreFileObject extends VectorStoreFileObject { + const factory _VectorStoreFileObject( {required final String id, required final String object, + @JsonKey(name: 'usage_bytes') required final int usageBytes, @JsonKey(name: 'created_at') required final int createdAt, @JsonKey(name: 'vector_store_id') required final String vectorStoreId, - required final VectorStoreFileBatchObjectStatus status, - @JsonKey(name: 'file_counts') - required final VectorStoreFileBatchObjectFileCounts fileCounts}) = - _$VectorStoreFileBatchObjectImpl; - const _VectorStoreFileBatchObject._() : super._(); - - factory _VectorStoreFileBatchObject.fromJson(Map json) = - _$VectorStoreFileBatchObjectImpl.fromJson; + required final VectorStoreFileStatus status, + @JsonKey(name: 'last_error') + required final VectorStoreFileObjectLastError? lastError, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyResponseParam? chunkingStrategy}) = + _$VectorStoreFileObjectImpl; + const _VectorStoreFileObject._() : super._(); - @override + factory _VectorStoreFileObject.fromJson(Map json) = + _$VectorStoreFileObjectImpl.fromJson; /// The identifier, which can be referenced in API endpoints. - String get id; @override + String get id; - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `vector_store.file`. + @override String get object; + + /// The total vector store usage in bytes. Note that this may be different from the original file size. @override + @JsonKey(name: 'usage_bytes') + int get usageBytes; - /// The Unix timestamp (in seconds) for when the vector store files batch was created. + /// The Unix timestamp (in seconds) for when the vector store file was created. + @override @JsonKey(name: 'created_at') int get createdAt; - @override /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override @JsonKey(name: 'vector_store_id') String get vectorStoreId; + + /// The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. @override + VectorStoreFileStatus get status; - /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. - VectorStoreFileBatchObjectStatus get status; + /// The last error associated with this vector store file. Will be `null` if there are no errors. @override + @JsonKey(name: 'last_error') + VectorStoreFileObjectLastError? get lastError; - /// The number of files per status. - @JsonKey(name: 'file_counts') - VectorStoreFileBatchObjectFileCounts get fileCounts; + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] @override - @JsonKey(ignore: true) - _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? get chunkingStrategy; + + /// Create a copy of VectorStoreFileObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreFileObjectImplCopyWith<_$VectorStoreFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } -VectorStoreFileBatchObjectFileCounts - _$VectorStoreFileBatchObjectFileCountsFromJson(Map json) { - return _VectorStoreFileBatchObjectFileCounts.fromJson(json); +VectorStoreFileObjectLastError _$VectorStoreFileObjectLastErrorFromJson( + Map json) { + return _VectorStoreFileObjectLastError.fromJson(json); } /// @nodoc -mixin _$VectorStoreFileBatchObjectFileCounts { - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress => throw _privateConstructorUsedError; - - /// The number of files that have been processed. - int get completed => throw _privateConstructorUsedError; - - /// The number of files that have failed to process. - int get failed => throw _privateConstructorUsedError; - - /// The number of files that where cancelled. - int get cancelled => throw _privateConstructorUsedError; +mixin _$VectorStoreFileObjectLastError { + /// One of `server_error` or `rate_limit_exceeded`. + VectorStoreFileObjectLastErrorCode get code => + throw _privateConstructorUsedError; - /// The total number of files. - int get total => throw _privateConstructorUsedError; + /// A human-readable description of the error. + String get message => throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileObjectLastError to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $VectorStoreFileBatchObjectFileCountsCopyWith< - VectorStoreFileBatchObjectFileCounts> + + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreFileObjectLastErrorCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - factory $VectorStoreFileBatchObjectFileCountsCopyWith( - VectorStoreFileBatchObjectFileCounts value, - $Res Function(VectorStoreFileBatchObjectFileCounts) then) = - _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - VectorStoreFileBatchObjectFileCounts>; +abstract class $VectorStoreFileObjectLastErrorCopyWith<$Res> { + factory $VectorStoreFileObjectLastErrorCopyWith( + VectorStoreFileObjectLastError value, + $Res Function(VectorStoreFileObjectLastError) then) = + _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + VectorStoreFileObjectLastError>; @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({VectorStoreFileObjectLastErrorCode code, String message}); } /// @nodoc -class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - $Val extends VectorStoreFileBatchObjectFileCounts> - implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - _$VectorStoreFileBatchObjectFileCountsCopyWithImpl(this._value, this._then); +class _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + $Val extends VectorStoreFileObjectLastError> + implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { + _$VectorStoreFileObjectLastErrorCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? code = null, + Object? message = null, }) { return _then(_value.copyWith( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastErrorCode, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, ) as $Val); } } /// @nodoc -abstract class _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> - implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { - factory _$$VectorStoreFileBatchObjectFileCountsImplCopyWith( - _$VectorStoreFileBatchObjectFileCountsImpl value, - $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) then) = - __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> + implements $VectorStoreFileObjectLastErrorCopyWith<$Res> { + factory _$$VectorStoreFileObjectLastErrorImplCopyWith( + _$VectorStoreFileObjectLastErrorImpl value, + $Res Function(_$VectorStoreFileObjectLastErrorImpl) then) = + __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(name: 'in_progress') int inProgress, - int completed, - int failed, - int cancelled, - int total}); + $Res call({VectorStoreFileObjectLastErrorCode code, String message}); } /// @nodoc -class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> - extends _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, - _$VectorStoreFileBatchObjectFileCountsImpl> - implements _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> { - __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl( - _$VectorStoreFileBatchObjectFileCountsImpl _value, - $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) +class __$$VectorStoreFileObjectLastErrorImplCopyWithImpl<$Res> + extends _$VectorStoreFileObjectLastErrorCopyWithImpl<$Res, + _$VectorStoreFileObjectLastErrorImpl> + implements _$$VectorStoreFileObjectLastErrorImplCopyWith<$Res> { + __$$VectorStoreFileObjectLastErrorImplCopyWithImpl( + _$VectorStoreFileObjectLastErrorImpl _value, + $Res Function(_$VectorStoreFileObjectLastErrorImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inProgress = null, - Object? completed = null, - Object? failed = null, - Object? cancelled = null, - Object? total = null, + Object? code = null, + Object? message = null, }) { - return _then(_$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: null == inProgress - ? _value.inProgress - : inProgress // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - cancelled: null == cancelled - ? _value.cancelled - : cancelled // ignore: cast_nullable_to_non_nullable - as int, - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$VectorStoreFileObjectLastErrorImpl( + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as VectorStoreFileObjectLastErrorCode, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$VectorStoreFileBatchObjectFileCountsImpl - extends _VectorStoreFileBatchObjectFileCounts { - const _$VectorStoreFileBatchObjectFileCountsImpl( - {@JsonKey(name: 'in_progress') required this.inProgress, - required this.completed, - required this.failed, - required this.cancelled, - required this.total}) +class _$VectorStoreFileObjectLastErrorImpl + extends _VectorStoreFileObjectLastError { + const _$VectorStoreFileObjectLastErrorImpl( + {required this.code, required this.message}) : super._(); - factory _$VectorStoreFileBatchObjectFileCountsImpl.fromJson( + factory _$VectorStoreFileObjectLastErrorImpl.fromJson( Map json) => - _$$VectorStoreFileBatchObjectFileCountsImplFromJson(json); - - /// The number of files that are currently being processed. - @override - @JsonKey(name: 'in_progress') - final int inProgress; - - /// The number of files that have been processed. - @override - final int completed; - - /// The number of files that have failed to process. - @override - final int failed; + _$$VectorStoreFileObjectLastErrorImplFromJson(json); - /// The number of files that where cancelled. + /// One of `server_error` or `rate_limit_exceeded`. @override - final int cancelled; + final VectorStoreFileObjectLastErrorCode code; - /// The total number of files. + /// A human-readable description of the error. @override - final int total; + final String message; @override String toString() { - return 'VectorStoreFileBatchObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; + return 'VectorStoreFileObjectLastError(code: $code, message: $message)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$VectorStoreFileBatchObjectFileCountsImpl && - (identical(other.inProgress, inProgress) || - other.inProgress == inProgress) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed) && - (identical(other.cancelled, cancelled) || - other.cancelled == cancelled) && - (identical(other.total, total) || other.total == total)); + other is _$VectorStoreFileObjectLastErrorImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); + int get hashCode => Object.hash(runtimeType, code, message); - @JsonKey(ignore: true) + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< - _$VectorStoreFileBatchObjectFileCountsImpl> - get copyWith => __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl< - _$VectorStoreFileBatchObjectFileCountsImpl>(this, _$identity); + _$$VectorStoreFileObjectLastErrorImplCopyWith< + _$VectorStoreFileObjectLastErrorImpl> + get copyWith => __$$VectorStoreFileObjectLastErrorImplCopyWithImpl< + _$VectorStoreFileObjectLastErrorImpl>(this, _$identity); @override Map toJson() { - return _$$VectorStoreFileBatchObjectFileCountsImplToJson( + return _$$VectorStoreFileObjectLastErrorImplToJson( this, ); } } -abstract class _VectorStoreFileBatchObjectFileCounts - extends VectorStoreFileBatchObjectFileCounts { - const factory _VectorStoreFileBatchObjectFileCounts( - {@JsonKey(name: 'in_progress') required final int inProgress, - required final int completed, - required final int failed, - required final int cancelled, - required final int total}) = _$VectorStoreFileBatchObjectFileCountsImpl; - const _VectorStoreFileBatchObjectFileCounts._() : super._(); - - factory _VectorStoreFileBatchObjectFileCounts.fromJson( - Map json) = - _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; - - @override - - /// The number of files that are currently being processed. - @JsonKey(name: 'in_progress') - int get inProgress; - @override +abstract class _VectorStoreFileObjectLastError + extends VectorStoreFileObjectLastError { + const factory _VectorStoreFileObjectLastError( + {required final VectorStoreFileObjectLastErrorCode code, + required final String message}) = _$VectorStoreFileObjectLastErrorImpl; + const _VectorStoreFileObjectLastError._() : super._(); - /// The number of files that have been processed. - int get completed; - @override + factory _VectorStoreFileObjectLastError.fromJson(Map json) = + _$VectorStoreFileObjectLastErrorImpl.fromJson; - /// The number of files that have failed to process. - int get failed; + /// One of `server_error` or `rate_limit_exceeded`. @override + VectorStoreFileObjectLastErrorCode get code; - /// The number of files that where cancelled. - int get cancelled; + /// A human-readable description of the error. @override + String get message; - /// The total number of files. - int get total; + /// Create a copy of VectorStoreFileObjectLastError + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< - _$VectorStoreFileBatchObjectFileCountsImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreFileObjectLastErrorImplCopyWith< + _$VectorStoreFileObjectLastErrorImpl> get copyWith => throw _privateConstructorUsedError; } -CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson( +StaticChunkingStrategy _$StaticChunkingStrategyFromJson( Map json) { - return _CreateVectorStoreFileBatchRequest.fromJson(json); + return _StaticChunkingStrategy.fromJson(json); } /// @nodoc -mixin _$CreateVectorStoreFileBatchRequest { - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids') - List get fileIds => throw _privateConstructorUsedError; +mixin _$StaticChunkingStrategy { + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens => throw _privateConstructorUsedError; + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens => throw _privateConstructorUsedError; + + /// Serializes this StaticChunkingStrategy to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateVectorStoreFileBatchRequestCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $StaticChunkingStrategyCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - factory $CreateVectorStoreFileBatchRequestCopyWith( - CreateVectorStoreFileBatchRequest value, - $Res Function(CreateVectorStoreFileBatchRequest) then) = - _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - CreateVectorStoreFileBatchRequest>; +abstract class $StaticChunkingStrategyCopyWith<$Res> { + factory $StaticChunkingStrategyCopyWith(StaticChunkingStrategy value, + $Res Function(StaticChunkingStrategy) then) = + _$StaticChunkingStrategyCopyWithImpl<$Res, StaticChunkingStrategy>; @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); } /// @nodoc -class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - $Val extends CreateVectorStoreFileBatchRequest> - implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - _$CreateVectorStoreFileBatchRequestCopyWithImpl(this._value, this._then); +class _$StaticChunkingStrategyCopyWithImpl<$Res, + $Val extends StaticChunkingStrategy> + implements $StaticChunkingStrategyCopyWith<$Res> { + _$StaticChunkingStrategyCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = null, + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, }) { return _then(_value.copyWith( - fileIds: null == fileIds - ? _value.fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List, + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, ) as $Val); } } /// @nodoc -abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> - implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { - factory _$$CreateVectorStoreFileBatchRequestImplCopyWith( - _$CreateVectorStoreFileBatchRequestImpl value, - $Res Function(_$CreateVectorStoreFileBatchRequestImpl) then) = - __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; +abstract class _$$StaticChunkingStrategyImplCopyWith<$Res> + implements $StaticChunkingStrategyCopyWith<$Res> { + factory _$$StaticChunkingStrategyImplCopyWith( + _$StaticChunkingStrategyImpl value, + $Res Function(_$StaticChunkingStrategyImpl) then) = + __$$StaticChunkingStrategyImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_ids') List fileIds}); + $Res call( + {@JsonKey(name: 'max_chunk_size_tokens') int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') int chunkOverlapTokens}); } /// @nodoc -class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> - extends _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, - _$CreateVectorStoreFileBatchRequestImpl> - implements _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> { - __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl( - _$CreateVectorStoreFileBatchRequestImpl _value, - $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) +class __$$StaticChunkingStrategyImplCopyWithImpl<$Res> + extends _$StaticChunkingStrategyCopyWithImpl<$Res, + _$StaticChunkingStrategyImpl> + implements _$$StaticChunkingStrategyImplCopyWith<$Res> { + __$$StaticChunkingStrategyImplCopyWithImpl( + _$StaticChunkingStrategyImpl _value, + $Res Function(_$StaticChunkingStrategyImpl) _then) : super(_value, _then); + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileIds = null, + Object? maxChunkSizeTokens = null, + Object? chunkOverlapTokens = null, }) { - return _then(_$CreateVectorStoreFileBatchRequestImpl( - fileIds: null == fileIds - ? _value._fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List, + return _then(_$StaticChunkingStrategyImpl( + maxChunkSizeTokens: null == maxChunkSizeTokens + ? _value.maxChunkSizeTokens + : maxChunkSizeTokens // ignore: cast_nullable_to_non_nullable + as int, + chunkOverlapTokens: null == chunkOverlapTokens + ? _value.chunkOverlapTokens + : chunkOverlapTokens // ignore: cast_nullable_to_non_nullable + as int, )); } } /// @nodoc @JsonSerializable() -class _$CreateVectorStoreFileBatchRequestImpl - extends _CreateVectorStoreFileBatchRequest { - const _$CreateVectorStoreFileBatchRequestImpl( - {@JsonKey(name: 'file_ids') required final List fileIds}) - : _fileIds = fileIds, - super._(); +class _$StaticChunkingStrategyImpl extends _StaticChunkingStrategy { + const _$StaticChunkingStrategyImpl( + {@JsonKey(name: 'max_chunk_size_tokens') required this.maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') required this.chunkOverlapTokens}) + : super._(); - factory _$CreateVectorStoreFileBatchRequestImpl.fromJson( - Map json) => - _$$CreateVectorStoreFileBatchRequestImplFromJson(json); + factory _$StaticChunkingStrategyImpl.fromJson(Map json) => + _$$StaticChunkingStrategyImplFromJson(json); - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - final List _fileIds; + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @override + @JsonKey(name: 'max_chunk_size_tokens') + final int maxChunkSizeTokens; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. @override - @JsonKey(name: 'file_ids') - List get fileIds { - if (_fileIds is EqualUnmodifiableListView) return _fileIds; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_fileIds); - } + @JsonKey(name: 'chunk_overlap_tokens') + final int chunkOverlapTokens; @override String toString() { - return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds)'; + return 'StaticChunkingStrategy(maxChunkSizeTokens: $maxChunkSizeTokens, chunkOverlapTokens: $chunkOverlapTokens)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateVectorStoreFileBatchRequestImpl && - const DeepCollectionEquality().equals(other._fileIds, _fileIds)); + other is _$StaticChunkingStrategyImpl && + (identical(other.maxChunkSizeTokens, maxChunkSizeTokens) || + other.maxChunkSizeTokens == maxChunkSizeTokens) && + (identical(other.chunkOverlapTokens, chunkOverlapTokens) || + other.chunkOverlapTokens == chunkOverlapTokens)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_fileIds)); + Object.hash(runtimeType, maxChunkSizeTokens, chunkOverlapTokens); - @JsonKey(ignore: true) + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateVectorStoreFileBatchRequestImplCopyWith< - _$CreateVectorStoreFileBatchRequestImpl> - get copyWith => __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl< - _$CreateVectorStoreFileBatchRequestImpl>(this, _$identity); + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> + get copyWith => __$$StaticChunkingStrategyImplCopyWithImpl< + _$StaticChunkingStrategyImpl>(this, _$identity); @override Map toJson() { - return _$$CreateVectorStoreFileBatchRequestImplToJson( + return _$$StaticChunkingStrategyImplToJson( this, ); } } -abstract class _CreateVectorStoreFileBatchRequest - extends CreateVectorStoreFileBatchRequest { - const factory _CreateVectorStoreFileBatchRequest( - {@JsonKey(name: 'file_ids') required final List fileIds}) = - _$CreateVectorStoreFileBatchRequestImpl; - const _CreateVectorStoreFileBatchRequest._() : super._(); +abstract class _StaticChunkingStrategy extends StaticChunkingStrategy { + const factory _StaticChunkingStrategy( + {@JsonKey(name: 'max_chunk_size_tokens') + required final int maxChunkSizeTokens, + @JsonKey(name: 'chunk_overlap_tokens') + required final int chunkOverlapTokens}) = _$StaticChunkingStrategyImpl; + const _StaticChunkingStrategy._() : super._(); - factory _CreateVectorStoreFileBatchRequest.fromJson( - Map json) = - _$CreateVectorStoreFileBatchRequestImpl.fromJson; + factory _StaticChunkingStrategy.fromJson(Map json) = + _$StaticChunkingStrategyImpl.fromJson; + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. @override + @JsonKey(name: 'max_chunk_size_tokens') + int get maxChunkSizeTokens; - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - @JsonKey(name: 'file_ids') - List get fileIds; + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. @override - @JsonKey(ignore: true) - _$$CreateVectorStoreFileBatchRequestImplCopyWith< - _$CreateVectorStoreFileBatchRequestImpl> + @JsonKey(name: 'chunk_overlap_tokens') + int get chunkOverlapTokens; + + /// Create a copy of StaticChunkingStrategy + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$StaticChunkingStrategyImplCopyWith<_$StaticChunkingStrategyImpl> get copyWith => throw _privateConstructorUsedError; } -Error _$ErrorFromJson(Map json) { - return _Error.fromJson(json); +CreateVectorStoreFileRequest _$CreateVectorStoreFileRequestFromJson( + Map json) { + return _CreateVectorStoreFileRequest.fromJson(json); } /// @nodoc -mixin _$Error { - /// The error code. - String? get code => throw _privateConstructorUsedError; - - /// A human-readable description of the error. - String get message => throw _privateConstructorUsedError; - - /// The parameter in the request that caused the error. - String? get param => throw _privateConstructorUsedError; +mixin _$CreateVectorStoreFileRequest { + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; - /// The type of error. - String get type => throw _privateConstructorUsedError; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + /// Serializes this CreateVectorStoreFileRequest to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CreateVectorStoreFileRequestCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ErrorCopyWith<$Res> { - factory $ErrorCopyWith(Error value, $Res Function(Error) then) = - _$ErrorCopyWithImpl<$Res, Error>; +abstract class $CreateVectorStoreFileRequestCopyWith<$Res> { + factory $CreateVectorStoreFileRequestCopyWith( + CreateVectorStoreFileRequest value, + $Res Function(CreateVectorStoreFileRequest) then) = + _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + CreateVectorStoreFileRequest>; @useResult - $Res call({String? code, String message, String? param, String type}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class _$ErrorCopyWithImpl<$Res, $Val extends Error> - implements $ErrorCopyWith<$Res> { - _$ErrorCopyWithImpl(this._value, this._then); +class _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreFileRequest> + implements $CreateVectorStoreFileRequestCopyWith<$Res> { + _$CreateVectorStoreFileRequestCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = freezed, - Object? message = null, - Object? param = freezed, - Object? type = null, + Object? fileId = null, + Object? chunkingStrategy = freezed, }) { return _then(_value.copyWith( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, ) as $Val); } + + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } } /// @nodoc -abstract class _$$ErrorImplCopyWith<$Res> implements $ErrorCopyWith<$Res> { - factory _$$ErrorImplCopyWith( - _$ErrorImpl value, $Res Function(_$ErrorImpl) then) = - __$$ErrorImplCopyWithImpl<$Res>; +abstract class _$$CreateVectorStoreFileRequestImplCopyWith<$Res> + implements $CreateVectorStoreFileRequestCopyWith<$Res> { + factory _$$CreateVectorStoreFileRequestImplCopyWith( + _$CreateVectorStoreFileRequestImpl value, + $Res Function(_$CreateVectorStoreFileRequestImpl) then) = + __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res>; @override @useResult - $Res call({String? code, String message, String? param, String type}); + $Res call( + {@JsonKey(name: 'file_id') String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; } /// @nodoc -class __$$ErrorImplCopyWithImpl<$Res> - extends _$ErrorCopyWithImpl<$Res, _$ErrorImpl> - implements _$$ErrorImplCopyWith<$Res> { - __$$ErrorImplCopyWithImpl( - _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) +class __$$CreateVectorStoreFileRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreFileRequestCopyWithImpl<$Res, + _$CreateVectorStoreFileRequestImpl> + implements _$$CreateVectorStoreFileRequestImplCopyWith<$Res> { + __$$CreateVectorStoreFileRequestImplCopyWithImpl( + _$CreateVectorStoreFileRequestImpl _value, + $Res Function(_$CreateVectorStoreFileRequestImpl) _then) : super(_value, _then); + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = freezed, - Object? message = null, - Object? param = freezed, - Object? type = null, + Object? fileId = null, + Object? chunkingStrategy = freezed, }) { - return _then(_$ErrorImpl( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: null == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + return _then(_$CreateVectorStoreFileRequestImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable as String, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, )); } } /// @nodoc @JsonSerializable() -class _$ErrorImpl extends _Error { - const _$ErrorImpl( - {required this.code, - required this.message, - required this.param, - required this.type}) +class _$CreateVectorStoreFileRequestImpl extends _CreateVectorStoreFileRequest { + const _$CreateVectorStoreFileRequestImpl( + {@JsonKey(name: 'file_id') required this.fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) : super._(); - factory _$ErrorImpl.fromJson(Map json) => - _$$ErrorImplFromJson(json); - - /// The error code. - @override - final String? code; - - /// A human-readable description of the error. - @override - final String message; + factory _$CreateVectorStoreFileRequestImpl.fromJson( + Map json) => + _$$CreateVectorStoreFileRequestImplFromJson(json); - /// The parameter in the request that caused the error. + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @override - final String? param; + @JsonKey(name: 'file_id') + final String fileId; - /// The type of error. + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override - final String type; + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; @override String toString() { - return 'Error(code: $code, message: $message, param: $param, type: $type)'; + return 'CreateVectorStoreFileRequest(fileId: $fileId, chunkingStrategy: $chunkingStrategy)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ErrorImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message) && - (identical(other.param, param) || other.param == param) && - (identical(other.type, type) || other.type == type)); + other is _$CreateVectorStoreFileRequestImpl && + (identical(other.fileId, fileId) || other.fileId == fileId) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, code, message, param, type); + int get hashCode => Object.hash(runtimeType, fileId, chunkingStrategy); - @JsonKey(ignore: true) + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => - __$$ErrorImplCopyWithImpl<_$ErrorImpl>(this, _$identity); + _$$CreateVectorStoreFileRequestImplCopyWith< + _$CreateVectorStoreFileRequestImpl> + get copyWith => __$$CreateVectorStoreFileRequestImplCopyWithImpl< + _$CreateVectorStoreFileRequestImpl>(this, _$identity); @override Map toJson() { - return _$$ErrorImplToJson( + return _$$CreateVectorStoreFileRequestImplToJson( this, ); } } -abstract class _Error extends Error { - const factory _Error( - {required final String? code, - required final String message, - required final String? param, - required final String type}) = _$ErrorImpl; - const _Error._() : super._(); - - factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; - - @override +abstract class _CreateVectorStoreFileRequest + extends CreateVectorStoreFileRequest { + const factory _CreateVectorStoreFileRequest( + {@JsonKey(name: 'file_id') required final String fileId, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = + _$CreateVectorStoreFileRequestImpl; + const _CreateVectorStoreFileRequest._() : super._(); - /// The error code. - String? get code; - @override + factory _CreateVectorStoreFileRequest.fromJson(Map json) = + _$CreateVectorStoreFileRequestImpl.fromJson; - /// A human-readable description of the error. - String get message; + /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. @override + @JsonKey(name: 'file_id') + String get fileId; - /// The parameter in the request that caused the error. - String? get param; + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; - /// The type of error. - String get type; + /// Create a copy of CreateVectorStoreFileRequest + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => - throw _privateConstructorUsedError; + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateVectorStoreFileRequestImplCopyWith< + _$CreateVectorStoreFileRequestImpl> + get copyWith => throw _privateConstructorUsedError; } -CreateBatchRequest _$CreateBatchRequestFromJson(Map json) { - return _CreateBatchRequest.fromJson(json); +ListVectorStoreFilesResponse _$ListVectorStoreFilesResponseFromJson( + Map json) { + return _ListVectorStoreFilesResponse.fromJson(json); } /// @nodoc -mixin _$CreateBatchRequest { - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. - @JsonKey(name: 'input_file_id') - String get inputFileId => throw _privateConstructorUsedError; +mixin _$ListVectorStoreFilesResponse { + /// The object type, which is always `list`. + String get object => throw _privateConstructorUsedError; - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint => throw _privateConstructorUsedError; + /// A list of message files. + List get data => throw _privateConstructorUsedError; - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow => - throw _privateConstructorUsedError; + /// The ID of the first message file in the list. + @JsonKey(name: 'first_id') + String get firstId => throw _privateConstructorUsedError; - /// Optional custom metadata for the batch. - @JsonKey(includeIfNull: false) - Map? get metadata => throw _privateConstructorUsedError; + /// The ID of the last message file in the list. + @JsonKey(name: 'last_id') + String get lastId => throw _privateConstructorUsedError; + + /// Whether there are more message files available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + /// Serializes this ListVectorStoreFilesResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $CreateBatchRequestCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ListVectorStoreFilesResponseCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $CreateBatchRequestCopyWith<$Res> { - factory $CreateBatchRequestCopyWith( - CreateBatchRequest value, $Res Function(CreateBatchRequest) then) = - _$CreateBatchRequestCopyWithImpl<$Res, CreateBatchRequest>; +abstract class $ListVectorStoreFilesResponseCopyWith<$Res> { + factory $ListVectorStoreFilesResponseCopyWith( + ListVectorStoreFilesResponse value, + $Res Function(ListVectorStoreFilesResponse) then) = + _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + ListVectorStoreFilesResponse>; @useResult $Res call( - {@JsonKey(name: 'input_file_id') String inputFileId, - BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) Map? metadata}); + {String object, + List data, + @JsonKey(name: 'first_id') String firstId, + @JsonKey(name: 'last_id') String lastId, + @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> - implements $CreateBatchRequestCopyWith<$Res> { - _$CreateBatchRequestCopyWithImpl(this._value, this._then); +class _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + $Val extends ListVectorStoreFilesResponse> + implements $ListVectorStoreFilesResponseCopyWith<$Res> { + _$ListVectorStoreFilesResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inputFileId = null, - Object? endpoint = null, - Object? completionWindow = null, - Object? metadata = freezed, + Object? object = null, + Object? data = null, + Object? firstId = null, + Object? lastId = null, + Object? hasMore = null, }) { return _then(_value.copyWith( - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable as String, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as Map?, + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: null == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String, + lastId: null == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, ) as $Val); } } /// @nodoc -abstract class _$$CreateBatchRequestImplCopyWith<$Res> - implements $CreateBatchRequestCopyWith<$Res> { - factory _$$CreateBatchRequestImplCopyWith(_$CreateBatchRequestImpl value, - $Res Function(_$CreateBatchRequestImpl) then) = - __$$CreateBatchRequestImplCopyWithImpl<$Res>; +abstract class _$$ListVectorStoreFilesResponseImplCopyWith<$Res> + implements $ListVectorStoreFilesResponseCopyWith<$Res> { + factory _$$ListVectorStoreFilesResponseImplCopyWith( + _$ListVectorStoreFilesResponseImpl value, + $Res Function(_$ListVectorStoreFilesResponseImpl) then) = + __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(name: 'input_file_id') String inputFileId, - BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) Map? metadata}); + {String object, + List data, + @JsonKey(name: 'first_id') String firstId, + @JsonKey(name: 'last_id') String lastId, + @JsonKey(name: 'has_more') bool hasMore}); } /// @nodoc -class __$$CreateBatchRequestImplCopyWithImpl<$Res> - extends _$CreateBatchRequestCopyWithImpl<$Res, _$CreateBatchRequestImpl> - implements _$$CreateBatchRequestImplCopyWith<$Res> { - __$$CreateBatchRequestImplCopyWithImpl(_$CreateBatchRequestImpl _value, - $Res Function(_$CreateBatchRequestImpl) _then) +class __$$ListVectorStoreFilesResponseImplCopyWithImpl<$Res> + extends _$ListVectorStoreFilesResponseCopyWithImpl<$Res, + _$ListVectorStoreFilesResponseImpl> + implements _$$ListVectorStoreFilesResponseImplCopyWith<$Res> { + __$$ListVectorStoreFilesResponseImplCopyWithImpl( + _$ListVectorStoreFilesResponseImpl _value, + $Res Function(_$ListVectorStoreFilesResponseImpl) _then) : super(_value, _then); + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? inputFileId = null, - Object? endpoint = null, - Object? completionWindow = null, - Object? metadata = freezed, + Object? object = null, + Object? data = null, + Object? firstId = null, + Object? lastId = null, + Object? hasMore = null, }) { - return _then(_$CreateBatchRequestImpl( - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable + return _then(_$ListVectorStoreFilesResponseImpl( + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable as String, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - metadata: freezed == metadata - ? _value._metadata - : metadata // ignore: cast_nullable_to_non_nullable - as Map?, + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: null == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String, + lastId: null == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, )); } } /// @nodoc @JsonSerializable() -class _$CreateBatchRequestImpl extends _CreateBatchRequest { - const _$CreateBatchRequestImpl( - {@JsonKey(name: 'input_file_id') required this.inputFileId, - required this.endpoint, - @JsonKey(name: 'completion_window') required this.completionWindow, - @JsonKey(includeIfNull: false) final Map? metadata}) - : _metadata = metadata, +class _$ListVectorStoreFilesResponseImpl extends _ListVectorStoreFilesResponse { + const _$ListVectorStoreFilesResponseImpl( + {required this.object, + required final List data, + @JsonKey(name: 'first_id') required this.firstId, + @JsonKey(name: 'last_id') required this.lastId, + @JsonKey(name: 'has_more') required this.hasMore}) + : _data = data, super._(); - factory _$CreateBatchRequestImpl.fromJson(Map json) => - _$$CreateBatchRequestImplFromJson(json); + factory _$ListVectorStoreFilesResponseImpl.fromJson( + Map json) => + _$$ListVectorStoreFilesResponseImplFromJson(json); - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + /// The object type, which is always `list`. @override - @JsonKey(name: 'input_file_id') - final String inputFileId; + final String object; - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + /// A list of message files. + final List _data; + + /// A list of message files. @override - final BatchEndpoint endpoint; + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } - /// The time frame within which the batch should be processed. Currently only `24h` is supported. + /// The ID of the first message file in the list. @override - @JsonKey(name: 'completion_window') - final BatchCompletionWindow completionWindow; + @JsonKey(name: 'first_id') + final String firstId; - /// Optional custom metadata for the batch. - final Map? _metadata; + /// The ID of the last message file in the list. + @override + @JsonKey(name: 'last_id') + final String lastId; - /// Optional custom metadata for the batch. + /// Whether there are more message files available. @override - @JsonKey(includeIfNull: false) - Map? get metadata { - final value = _metadata; - if (value == null) return null; - if (_metadata is EqualUnmodifiableMapView) return _metadata; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(value); - } + @JsonKey(name: 'has_more') + final bool hasMore; @override String toString() { - return 'CreateBatchRequest(inputFileId: $inputFileId, endpoint: $endpoint, completionWindow: $completionWindow, metadata: $metadata)'; + return 'ListVectorStoreFilesResponse(object: $object, data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$CreateBatchRequestImpl && - (identical(other.inputFileId, inputFileId) || - other.inputFileId == inputFileId) && - (identical(other.endpoint, endpoint) || - other.endpoint == endpoint) && - (identical(other.completionWindow, completionWindow) || - other.completionWindow == completionWindow) && - const DeepCollectionEquality().equals(other._metadata, _metadata)); + other is _$ListVectorStoreFilesResponseImpl && + (identical(other.object, object) || other.object == object) && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.firstId, firstId) || other.firstId == firstId) && + (identical(other.lastId, lastId) || other.lastId == lastId) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, - completionWindow, const DeepCollectionEquality().hash(_metadata)); + int get hashCode => Object.hash(runtimeType, object, + const DeepCollectionEquality().hash(_data), firstId, lastId, hasMore); - @JsonKey(ignore: true) + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => - __$$CreateBatchRequestImplCopyWithImpl<_$CreateBatchRequestImpl>( - this, _$identity); + _$$ListVectorStoreFilesResponseImplCopyWith< + _$ListVectorStoreFilesResponseImpl> + get copyWith => __$$ListVectorStoreFilesResponseImplCopyWithImpl< + _$ListVectorStoreFilesResponseImpl>(this, _$identity); @override Map toJson() { - return _$$CreateBatchRequestImplToJson( + return _$$ListVectorStoreFilesResponseImplToJson( this, ); } } -abstract class _CreateBatchRequest extends CreateBatchRequest { - const factory _CreateBatchRequest( - {@JsonKey(name: 'input_file_id') required final String inputFileId, - required final BatchEndpoint endpoint, - @JsonKey(name: 'completion_window') - required final BatchCompletionWindow completionWindow, - @JsonKey(includeIfNull: false) final Map? metadata}) = - _$CreateBatchRequestImpl; - const _CreateBatchRequest._() : super._(); +abstract class _ListVectorStoreFilesResponse + extends ListVectorStoreFilesResponse { + const factory _ListVectorStoreFilesResponse( + {required final String object, + required final List data, + @JsonKey(name: 'first_id') required final String firstId, + @JsonKey(name: 'last_id') required final String lastId, + @JsonKey(name: 'has_more') required final bool hasMore}) = + _$ListVectorStoreFilesResponseImpl; + const _ListVectorStoreFilesResponse._() : super._(); - factory _CreateBatchRequest.fromJson(Map json) = - _$CreateBatchRequestImpl.fromJson; + factory _ListVectorStoreFilesResponse.fromJson(Map json) = + _$ListVectorStoreFilesResponseImpl.fromJson; + /// The object type, which is always `list`. @override + String get object; - /// The ID of an uploaded file that contains requests for the new batch. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - /// - /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. - @JsonKey(name: 'input_file_id') - String get inputFileId; + /// A list of message files. @override + List get data; - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; + /// The ID of the first message file in the list. @override + @JsonKey(name: 'first_id') + String get firstId; - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow; + /// The ID of the last message file in the list. @override + @JsonKey(name: 'last_id') + String get lastId; - /// Optional custom metadata for the batch. - @JsonKey(includeIfNull: false) - Map? get metadata; + /// Whether there are more message files available. @override - @JsonKey(ignore: true) - _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => - throw _privateConstructorUsedError; + @JsonKey(name: 'has_more') + bool get hasMore; + + /// Create a copy of ListVectorStoreFilesResponse + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ListVectorStoreFilesResponseImplCopyWith< + _$ListVectorStoreFilesResponseImpl> + get copyWith => throw _privateConstructorUsedError; } -Batch _$BatchFromJson(Map json) { - return _Batch.fromJson(json); +DeleteVectorStoreFileResponse _$DeleteVectorStoreFileResponseFromJson( + Map json) { + return _DeleteVectorStoreFileResponse.fromJson(json); } /// @nodoc -mixin _$Batch { - /// No Description +mixin _$DeleteVectorStoreFileResponse { + /// The ID of the deleted vector store file. String get id => throw _privateConstructorUsedError; - /// The object type, which is always `batch`. - BatchObject get object => throw _privateConstructorUsedError; - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint => throw _privateConstructorUsedError; - - /// No Description - @JsonKey(includeIfNull: false) - BatchErrors? get errors => throw _privateConstructorUsedError; - - /// The ID of the input file for the batch. - @JsonKey(name: 'input_file_id') - String get inputFileId => throw _privateConstructorUsedError; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow => - throw _privateConstructorUsedError; - - /// The current status of the batch. - BatchStatus get status => throw _privateConstructorUsedError; - - /// The ID of the file containing the outputs of successfully executed requests. - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? get outputFileId => throw _privateConstructorUsedError; - - /// The ID of the file containing the outputs of requests with errors. - @JsonKey(name: 'error_file_id', includeIfNull: false) - String? get errorFileId => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was created. - @JsonKey(name: 'created_at') - int get createdAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started processing. - @JsonKey(name: 'in_progress_at', includeIfNull: false) - int? get inProgressAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @JsonKey(name: 'finalizing_at', includeIfNull: false) - int? get finalizingAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was completed. - @JsonKey(name: 'completed_at', includeIfNull: false) - int? get completedAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch failed. - @JsonKey(name: 'failed_at', includeIfNull: false) - int? get failedAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch expired. - @JsonKey(name: 'expired_at', includeIfNull: false) - int? get expiredAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @JsonKey(name: 'cancelling_at', includeIfNull: false) - int? get cancellingAt => throw _privateConstructorUsedError; - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @JsonKey(name: 'cancelled_at', includeIfNull: false) - int? get cancelledAt => throw _privateConstructorUsedError; - - /// The request counts for different statuses within the batch. - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; + /// Whether the vector store file was deleted. + bool get deleted => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata => throw _privateConstructorUsedError; + /// The object type, which is always `vector_store.file.deleted`. + String get object => throw _privateConstructorUsedError; + /// Serializes this DeleteVectorStoreFileResponse to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BatchCopyWith get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $DeleteVectorStoreFileResponseCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $BatchCopyWith<$Res> { - factory $BatchCopyWith(Batch value, $Res Function(Batch) then) = - _$BatchCopyWithImpl<$Res, Batch>; +abstract class $DeleteVectorStoreFileResponseCopyWith<$Res> { + factory $DeleteVectorStoreFileResponseCopyWith( + DeleteVectorStoreFileResponse value, + $Res Function(DeleteVectorStoreFileResponse) then) = + _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + DeleteVectorStoreFileResponse>; @useResult - $Res call( - {String id, - BatchObject object, - BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) BatchErrors? errors, - @JsonKey(name: 'input_file_id') String inputFileId, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) dynamic metadata}); - - $BatchErrorsCopyWith<$Res>? get errors; - $BatchRequestCountsCopyWith<$Res>? get requestCounts; + $Res call({String id, bool deleted, String object}); } /// @nodoc -class _$BatchCopyWithImpl<$Res, $Val extends Batch> - implements $BatchCopyWith<$Res> { - _$BatchCopyWithImpl(this._value, this._then); +class _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + $Val extends DeleteVectorStoreFileResponse> + implements $DeleteVectorStoreFileResponseCopyWith<$Res> { + _$DeleteVectorStoreFileResponseCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, + Object? deleted = null, Object? object = null, - Object? endpoint = null, - Object? errors = freezed, - Object? inputFileId = null, - Object? completionWindow = null, - Object? status = null, - Object? outputFileId = freezed, - Object? errorFileId = freezed, - Object? createdAt = null, - Object? inProgressAt = freezed, - Object? expiresAt = freezed, - Object? finalizingAt = freezed, - Object? completedAt = freezed, - Object? failedAt = freezed, - Object? expiredAt = freezed, - Object? cancellingAt = freezed, - Object? cancelledAt = freezed, - Object? requestCounts = freezed, - Object? metadata = freezed, }) { return _then(_value.copyWith( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as BatchObject, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - errors: freezed == errors - ? _value.errors - : errors // ignore: cast_nullable_to_non_nullable - as BatchErrors?, - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable as String, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as BatchStatus, - outputFileId: freezed == outputFileId - ? _value.outputFileId - : outputFileId // ignore: cast_nullable_to_non_nullable - as String?, - errorFileId: freezed == errorFileId - ? _value.errorFileId - : errorFileId // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - inProgressAt: freezed == inProgressAt - ? _value.inProgressAt - : inProgressAt // ignore: cast_nullable_to_non_nullable - as int?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - finalizingAt: freezed == finalizingAt - ? _value.finalizingAt - : finalizingAt // ignore: cast_nullable_to_non_nullable - as int?, - completedAt: freezed == completedAt - ? _value.completedAt - : completedAt // ignore: cast_nullable_to_non_nullable - as int?, - failedAt: freezed == failedAt - ? _value.failedAt - : failedAt // ignore: cast_nullable_to_non_nullable - as int?, - expiredAt: freezed == expiredAt - ? _value.expiredAt - : expiredAt // ignore: cast_nullable_to_non_nullable - as int?, - cancellingAt: freezed == cancellingAt - ? _value.cancellingAt - : cancellingAt // ignore: cast_nullable_to_non_nullable - as int?, - cancelledAt: freezed == cancelledAt - ? _value.cancelledAt - : cancelledAt // ignore: cast_nullable_to_non_nullable - as int?, - requestCounts: freezed == requestCounts - ? _value.requestCounts - : requestCounts // ignore: cast_nullable_to_non_nullable - as BatchRequestCounts?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, ) as $Val); } - - @override - @pragma('vm:prefer-inline') - $BatchErrorsCopyWith<$Res>? get errors { - if (_value.errors == null) { - return null; - } - - return $BatchErrorsCopyWith<$Res>(_value.errors!, (value) { - return _then(_value.copyWith(errors: value) as $Val); - }); - } - - @override - @pragma('vm:prefer-inline') - $BatchRequestCountsCopyWith<$Res>? get requestCounts { - if (_value.requestCounts == null) { - return null; - } - - return $BatchRequestCountsCopyWith<$Res>(_value.requestCounts!, (value) { - return _then(_value.copyWith(requestCounts: value) as $Val); - }); - } } /// @nodoc -abstract class _$$BatchImplCopyWith<$Res> implements $BatchCopyWith<$Res> { - factory _$$BatchImplCopyWith( - _$BatchImpl value, $Res Function(_$BatchImpl) then) = - __$$BatchImplCopyWithImpl<$Res>; +abstract class _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> + implements $DeleteVectorStoreFileResponseCopyWith<$Res> { + factory _$$DeleteVectorStoreFileResponseImplCopyWith( + _$DeleteVectorStoreFileResponseImpl value, + $Res Function(_$DeleteVectorStoreFileResponseImpl) then) = + __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {String id, - BatchObject object, - BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) BatchErrors? errors, - @JsonKey(name: 'input_file_id') String inputFileId, - @JsonKey(name: 'completion_window') - BatchCompletionWindow completionWindow, - BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, - @JsonKey(name: 'created_at') int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) dynamic metadata}); - - @override - $BatchErrorsCopyWith<$Res>? get errors; - @override - $BatchRequestCountsCopyWith<$Res>? get requestCounts; + $Res call({String id, bool deleted, String object}); } /// @nodoc -class __$$BatchImplCopyWithImpl<$Res> - extends _$BatchCopyWithImpl<$Res, _$BatchImpl> - implements _$$BatchImplCopyWith<$Res> { - __$$BatchImplCopyWithImpl( - _$BatchImpl _value, $Res Function(_$BatchImpl) _then) +class __$$DeleteVectorStoreFileResponseImplCopyWithImpl<$Res> + extends _$DeleteVectorStoreFileResponseCopyWithImpl<$Res, + _$DeleteVectorStoreFileResponseImpl> + implements _$$DeleteVectorStoreFileResponseImplCopyWith<$Res> { + __$$DeleteVectorStoreFileResponseImplCopyWithImpl( + _$DeleteVectorStoreFileResponseImpl _value, + $Res Function(_$DeleteVectorStoreFileResponseImpl) _then) : super(_value, _then); + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? id = null, + Object? deleted = null, Object? object = null, - Object? endpoint = null, - Object? errors = freezed, - Object? inputFileId = null, - Object? completionWindow = null, - Object? status = null, - Object? outputFileId = freezed, - Object? errorFileId = freezed, - Object? createdAt = null, - Object? inProgressAt = freezed, - Object? expiresAt = freezed, - Object? finalizingAt = freezed, - Object? completedAt = freezed, - Object? failedAt = freezed, - Object? expiredAt = freezed, - Object? cancellingAt = freezed, - Object? cancelledAt = freezed, - Object? requestCounts = freezed, - Object? metadata = freezed, }) { - return _then(_$BatchImpl( + return _then(_$DeleteVectorStoreFileResponseImpl( id: null == id ? _value.id : id // ignore: cast_nullable_to_non_nullable as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as BatchObject, - endpoint: null == endpoint - ? _value.endpoint - : endpoint // ignore: cast_nullable_to_non_nullable - as BatchEndpoint, - errors: freezed == errors - ? _value.errors - : errors // ignore: cast_nullable_to_non_nullable - as BatchErrors?, - inputFileId: null == inputFileId - ? _value.inputFileId - : inputFileId // ignore: cast_nullable_to_non_nullable as String, - completionWindow: null == completionWindow - ? _value.completionWindow - : completionWindow // ignore: cast_nullable_to_non_nullable - as BatchCompletionWindow, - status: null == status - ? _value.status - : status // ignore: cast_nullable_to_non_nullable - as BatchStatus, - outputFileId: freezed == outputFileId - ? _value.outputFileId - : outputFileId // ignore: cast_nullable_to_non_nullable - as String?, - errorFileId: freezed == errorFileId - ? _value.errorFileId - : errorFileId // ignore: cast_nullable_to_non_nullable - as String?, - createdAt: null == createdAt - ? _value.createdAt - : createdAt // ignore: cast_nullable_to_non_nullable - as int, - inProgressAt: freezed == inProgressAt - ? _value.inProgressAt - : inProgressAt // ignore: cast_nullable_to_non_nullable - as int?, - expiresAt: freezed == expiresAt - ? _value.expiresAt - : expiresAt // ignore: cast_nullable_to_non_nullable - as int?, - finalizingAt: freezed == finalizingAt - ? _value.finalizingAt - : finalizingAt // ignore: cast_nullable_to_non_nullable - as int?, - completedAt: freezed == completedAt - ? _value.completedAt - : completedAt // ignore: cast_nullable_to_non_nullable - as int?, - failedAt: freezed == failedAt - ? _value.failedAt - : failedAt // ignore: cast_nullable_to_non_nullable - as int?, - expiredAt: freezed == expiredAt - ? _value.expiredAt - : expiredAt // ignore: cast_nullable_to_non_nullable - as int?, - cancellingAt: freezed == cancellingAt - ? _value.cancellingAt - : cancellingAt // ignore: cast_nullable_to_non_nullable - as int?, - cancelledAt: freezed == cancelledAt - ? _value.cancelledAt - : cancelledAt // ignore: cast_nullable_to_non_nullable - as int?, - requestCounts: freezed == requestCounts - ? _value.requestCounts - : requestCounts // ignore: cast_nullable_to_non_nullable - as BatchRequestCounts?, - metadata: freezed == metadata - ? _value.metadata - : metadata // ignore: cast_nullable_to_non_nullable - as dynamic, )); } } /// @nodoc @JsonSerializable() -class _$BatchImpl extends _Batch { - const _$BatchImpl( - {required this.id, - required this.object, - required this.endpoint, - @JsonKey(includeIfNull: false) this.errors, - @JsonKey(name: 'input_file_id') required this.inputFileId, - @JsonKey(name: 'completion_window') required this.completionWindow, - required this.status, - @JsonKey(name: 'output_file_id', includeIfNull: false) this.outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) this.errorFileId, - @JsonKey(name: 'created_at') required this.createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) this.inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) this.finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) this.completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) this.failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) this.expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) this.cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) this.cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) this.requestCounts, - @JsonKey(includeIfNull: false) this.metadata}) +class _$DeleteVectorStoreFileResponseImpl + extends _DeleteVectorStoreFileResponse { + const _$DeleteVectorStoreFileResponseImpl( + {required this.id, required this.deleted, required this.object}) : super._(); - factory _$BatchImpl.fromJson(Map json) => - _$$BatchImplFromJson(json); + factory _$DeleteVectorStoreFileResponseImpl.fromJson( + Map json) => + _$$DeleteVectorStoreFileResponseImplFromJson(json); - /// No Description + /// The ID of the deleted vector store file. @override final String id; - /// The object type, which is always `batch`. + /// Whether the vector store file was deleted. @override - final BatchObject object; + final bool deleted; - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + /// The object type, which is always `vector_store.file.deleted`. @override - final BatchEndpoint endpoint; - - /// No Description - @override - @JsonKey(includeIfNull: false) - final BatchErrors? errors; - - /// The ID of the input file for the batch. - @override - @JsonKey(name: 'input_file_id') - final String inputFileId; - - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @override - @JsonKey(name: 'completion_window') - final BatchCompletionWindow completionWindow; - - /// The current status of the batch. - @override - final BatchStatus status; - - /// The ID of the file containing the outputs of successfully executed requests. - @override - @JsonKey(name: 'output_file_id', includeIfNull: false) - final String? outputFileId; - - /// The ID of the file containing the outputs of requests with errors. - @override - @JsonKey(name: 'error_file_id', includeIfNull: false) - final String? errorFileId; - - /// The Unix timestamp (in seconds) for when the batch was created. - @override - @JsonKey(name: 'created_at') - final int createdAt; - - /// The Unix timestamp (in seconds) for when the batch started processing. - @override - @JsonKey(name: 'in_progress_at', includeIfNull: false) - final int? inProgressAt; - - /// The Unix timestamp (in seconds) for when the batch will expire. - @override - @JsonKey(name: 'expires_at', includeIfNull: false) - final int? expiresAt; - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @override - @JsonKey(name: 'finalizing_at', includeIfNull: false) - final int? finalizingAt; - - /// The Unix timestamp (in seconds) for when the batch was completed. - @override - @JsonKey(name: 'completed_at', includeIfNull: false) - final int? completedAt; - - /// The Unix timestamp (in seconds) for when the batch failed. - @override - @JsonKey(name: 'failed_at', includeIfNull: false) - final int? failedAt; - - /// The Unix timestamp (in seconds) for when the batch expired. - @override - @JsonKey(name: 'expired_at', includeIfNull: false) - final int? expiredAt; - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @override - @JsonKey(name: 'cancelling_at', includeIfNull: false) - final int? cancellingAt; - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @override - @JsonKey(name: 'cancelled_at', includeIfNull: false) - final int? cancelledAt; - - /// The request counts for different statuses within the batch. - @override - @JsonKey(name: 'request_counts', includeIfNull: false) - final BatchRequestCounts? requestCounts; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @override - @JsonKey(includeIfNull: false) - final dynamic metadata; + final String object; @override String toString() { - return 'Batch(id: $id, object: $object, endpoint: $endpoint, errors: $errors, inputFileId: $inputFileId, completionWindow: $completionWindow, status: $status, outputFileId: $outputFileId, errorFileId: $errorFileId, createdAt: $createdAt, inProgressAt: $inProgressAt, expiresAt: $expiresAt, finalizingAt: $finalizingAt, completedAt: $completedAt, failedAt: $failedAt, expiredAt: $expiredAt, cancellingAt: $cancellingAt, cancelledAt: $cancelledAt, requestCounts: $requestCounts, metadata: $metadata)'; + return 'DeleteVectorStoreFileResponse(id: $id, deleted: $deleted, object: $object)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$BatchImpl && + other is _$DeleteVectorStoreFileResponseImpl && (identical(other.id, id) || other.id == id) && - (identical(other.object, object) || other.object == object) && - (identical(other.endpoint, endpoint) || - other.endpoint == endpoint) && - (identical(other.errors, errors) || other.errors == errors) && - (identical(other.inputFileId, inputFileId) || - other.inputFileId == inputFileId) && - (identical(other.completionWindow, completionWindow) || - other.completionWindow == completionWindow) && - (identical(other.status, status) || other.status == status) && - (identical(other.outputFileId, outputFileId) || - other.outputFileId == outputFileId) && - (identical(other.errorFileId, errorFileId) || - other.errorFileId == errorFileId) && - (identical(other.createdAt, createdAt) || - other.createdAt == createdAt) && - (identical(other.inProgressAt, inProgressAt) || - other.inProgressAt == inProgressAt) && - (identical(other.expiresAt, expiresAt) || - other.expiresAt == expiresAt) && - (identical(other.finalizingAt, finalizingAt) || - other.finalizingAt == finalizingAt) && - (identical(other.completedAt, completedAt) || - other.completedAt == completedAt) && - (identical(other.failedAt, failedAt) || - other.failedAt == failedAt) && - (identical(other.expiredAt, expiredAt) || - other.expiredAt == expiredAt) && - (identical(other.cancellingAt, cancellingAt) || - other.cancellingAt == cancellingAt) && - (identical(other.cancelledAt, cancelledAt) || - other.cancelledAt == cancelledAt) && - (identical(other.requestCounts, requestCounts) || - other.requestCounts == requestCounts) && - const DeepCollectionEquality().equals(other.metadata, metadata)); + (identical(other.deleted, deleted) || other.deleted == deleted) && + (identical(other.object, object) || other.object == object)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hashAll([ - runtimeType, - id, - object, - endpoint, - errors, - inputFileId, - completionWindow, - status, - outputFileId, - errorFileId, - createdAt, - inProgressAt, - expiresAt, - finalizingAt, - completedAt, - failedAt, - expiredAt, - cancellingAt, - cancelledAt, - requestCounts, - const DeepCollectionEquality().hash(metadata) - ]); + int get hashCode => Object.hash(runtimeType, id, deleted, object); - @JsonKey(ignore: true) + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$BatchImplCopyWith<_$BatchImpl> get copyWith => - __$$BatchImplCopyWithImpl<_$BatchImpl>(this, _$identity); + _$$DeleteVectorStoreFileResponseImplCopyWith< + _$DeleteVectorStoreFileResponseImpl> + get copyWith => __$$DeleteVectorStoreFileResponseImplCopyWithImpl< + _$DeleteVectorStoreFileResponseImpl>(this, _$identity); @override Map toJson() { - return _$$BatchImplToJson( + return _$$DeleteVectorStoreFileResponseImplToJson( this, ); } } -abstract class _Batch extends Batch { - const factory _Batch( +abstract class _DeleteVectorStoreFileResponse + extends DeleteVectorStoreFileResponse { + const factory _DeleteVectorStoreFileResponse( {required final String id, - required final BatchObject object, - required final BatchEndpoint endpoint, - @JsonKey(includeIfNull: false) final BatchErrors? errors, - @JsonKey(name: 'input_file_id') required final String inputFileId, - @JsonKey(name: 'completion_window') - required final BatchCompletionWindow completionWindow, - required final BatchStatus status, - @JsonKey(name: 'output_file_id', includeIfNull: false) - final String? outputFileId, - @JsonKey(name: 'error_file_id', includeIfNull: false) - final String? errorFileId, - @JsonKey(name: 'created_at') required final int createdAt, - @JsonKey(name: 'in_progress_at', includeIfNull: false) - final int? inProgressAt, - @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, - @JsonKey(name: 'finalizing_at', includeIfNull: false) - final int? finalizingAt, - @JsonKey(name: 'completed_at', includeIfNull: false) - final int? completedAt, - @JsonKey(name: 'failed_at', includeIfNull: false) final int? failedAt, - @JsonKey(name: 'expired_at', includeIfNull: false) final int? expiredAt, - @JsonKey(name: 'cancelling_at', includeIfNull: false) - final int? cancellingAt, - @JsonKey(name: 'cancelled_at', includeIfNull: false) - final int? cancelledAt, - @JsonKey(name: 'request_counts', includeIfNull: false) - final BatchRequestCounts? requestCounts, - @JsonKey(includeIfNull: false) final dynamic metadata}) = _$BatchImpl; - const _Batch._() : super._(); + required final bool deleted, + required final String object}) = _$DeleteVectorStoreFileResponseImpl; + const _DeleteVectorStoreFileResponse._() : super._(); - factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; + factory _DeleteVectorStoreFileResponse.fromJson(Map json) = + _$DeleteVectorStoreFileResponseImpl.fromJson; + /// The ID of the deleted vector store file. @override - - /// No Description String get id; - @override - - /// The object type, which is always `batch`. - BatchObject get object; - @override - - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. - BatchEndpoint get endpoint; - @override - /// No Description - @JsonKey(includeIfNull: false) - BatchErrors? get errors; + /// Whether the vector store file was deleted. @override + bool get deleted; - /// The ID of the input file for the batch. - @JsonKey(name: 'input_file_id') - String get inputFileId; + /// The object type, which is always `vector_store.file.deleted`. @override + String get object; - /// The time frame within which the batch should be processed. Currently only `24h` is supported. - @JsonKey(name: 'completion_window') - BatchCompletionWindow get completionWindow; + /// Create a copy of DeleteVectorStoreFileResponse + /// with the given fields replaced by the non-null parameter values. @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$DeleteVectorStoreFileResponseImplCopyWith< + _$DeleteVectorStoreFileResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} - /// The current status of the batch. - BatchStatus get status; - @override +VectorStoreFileBatchObject _$VectorStoreFileBatchObjectFromJson( + Map json) { + return _VectorStoreFileBatchObject.fromJson(json); +} - /// The ID of the file containing the outputs of successfully executed requests. - @JsonKey(name: 'output_file_id', includeIfNull: false) - String? get outputFileId; - @override +/// @nodoc +mixin _$VectorStoreFileBatchObject { + /// The identifier, which can be referenced in API endpoints. + String get id => throw _privateConstructorUsedError; - /// The ID of the file containing the outputs of requests with errors. - @JsonKey(name: 'error_file_id', includeIfNull: false) - String? get errorFileId; - @override + /// The object type, which is always `vector_store.file_batch`. + String get object => throw _privateConstructorUsedError; - /// The Unix timestamp (in seconds) for when the batch was created. + /// The Unix timestamp (in seconds) for when the vector store files batch was created. @JsonKey(name: 'created_at') - int get createdAt; - @override - - /// The Unix timestamp (in seconds) for when the batch started processing. - @JsonKey(name: 'in_progress_at', includeIfNull: false) - int? get inProgressAt; - @override - - /// The Unix timestamp (in seconds) for when the batch will expire. - @JsonKey(name: 'expires_at', includeIfNull: false) - int? get expiresAt; - @override - - /// The Unix timestamp (in seconds) for when the batch started finalizing. - @JsonKey(name: 'finalizing_at', includeIfNull: false) - int? get finalizingAt; - @override - - /// The Unix timestamp (in seconds) for when the batch was completed. - @JsonKey(name: 'completed_at', includeIfNull: false) - int? get completedAt; - @override - - /// The Unix timestamp (in seconds) for when the batch failed. - @JsonKey(name: 'failed_at', includeIfNull: false) - int? get failedAt; - @override - - /// The Unix timestamp (in seconds) for when the batch expired. - @JsonKey(name: 'expired_at', includeIfNull: false) - int? get expiredAt; - @override - - /// The Unix timestamp (in seconds) for when the batch started cancelling. - @JsonKey(name: 'cancelling_at', includeIfNull: false) - int? get cancellingAt; - @override - - /// The Unix timestamp (in seconds) for when the batch was cancelled. - @JsonKey(name: 'cancelled_at', includeIfNull: false) - int? get cancelledAt; - @override + int get createdAt => throw _privateConstructorUsedError; - /// The request counts for different statuses within the batch. - @JsonKey(name: 'request_counts', includeIfNull: false) - BatchRequestCounts? get requestCounts; - @override + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @JsonKey(name: 'vector_store_id') + String get vectorStoreId => throw _privateConstructorUsedError; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - dynamic get metadata; - @override - @JsonKey(ignore: true) - _$$BatchImplCopyWith<_$BatchImpl> get copyWith => + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + VectorStoreFileBatchObjectStatus get status => throw _privateConstructorUsedError; -} - -BatchErrors _$BatchErrorsFromJson(Map json) { - return _BatchErrors.fromJson(json); -} - -/// @nodoc -mixin _$BatchErrors { - /// The object type, which is always `list`. - @JsonKey(includeIfNull: false) - String? get object => throw _privateConstructorUsedError; - /// No Description - @JsonKey(includeIfNull: false) - List? get data => throw _privateConstructorUsedError; + /// The number of files per status. + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts get fileCounts => + throw _privateConstructorUsedError; + /// Serializes this VectorStoreFileBatchObject to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BatchErrorsCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreFileBatchObjectCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $BatchErrorsCopyWith<$Res> { - factory $BatchErrorsCopyWith( - BatchErrors value, $Res Function(BatchErrors) then) = - _$BatchErrorsCopyWithImpl<$Res, BatchErrors>; +abstract class $VectorStoreFileBatchObjectCopyWith<$Res> { + factory $VectorStoreFileBatchObjectCopyWith(VectorStoreFileBatchObject value, + $Res Function(VectorStoreFileBatchObject) then) = + _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + VectorStoreFileBatchObject>; @useResult $Res call( - {@JsonKey(includeIfNull: false) String? object, - @JsonKey(includeIfNull: false) List? data}); + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'vector_store_id') String vectorStoreId, + VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts fileCounts}); + + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; } /// @nodoc -class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> - implements $BatchErrorsCopyWith<$Res> { - _$BatchErrorsCopyWithImpl(this._value, this._then); +class _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + $Val extends VectorStoreFileBatchObject> + implements $VectorStoreFileBatchObjectCopyWith<$Res> { + _$VectorStoreFileBatchObjectCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? object = freezed, - Object? data = freezed, + Object? id = null, + Object? object = null, + Object? createdAt = null, + Object? vectorStoreId = null, + Object? status = null, + Object? fileCounts = null, }) { return _then(_value.copyWith( - object: freezed == object + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String?, - data: freezed == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List?, + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + vectorStoreId: null == vectorStoreId + ? _value.vectorStoreId + : vectorStoreId // ignore: cast_nullable_to_non_nullable + as String, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectStatus, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectFileCounts, ) as $Val); } + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts { + return $VectorStoreFileBatchObjectFileCountsCopyWith<$Res>( + _value.fileCounts, (value) { + return _then(_value.copyWith(fileCounts: value) as $Val); + }); + } } /// @nodoc -abstract class _$$BatchErrorsImplCopyWith<$Res> - implements $BatchErrorsCopyWith<$Res> { - factory _$$BatchErrorsImplCopyWith( - _$BatchErrorsImpl value, $Res Function(_$BatchErrorsImpl) then) = - __$$BatchErrorsImplCopyWithImpl<$Res>; +abstract class _$$VectorStoreFileBatchObjectImplCopyWith<$Res> + implements $VectorStoreFileBatchObjectCopyWith<$Res> { + factory _$$VectorStoreFileBatchObjectImplCopyWith( + _$VectorStoreFileBatchObjectImpl value, + $Res Function(_$VectorStoreFileBatchObjectImpl) then) = + __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {@JsonKey(includeIfNull: false) String? object, - @JsonKey(includeIfNull: false) List? data}); + {String id, + String object, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'vector_store_id') String vectorStoreId, + VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts fileCounts}); + + @override + $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> get fileCounts; } /// @nodoc -class __$$BatchErrorsImplCopyWithImpl<$Res> - extends _$BatchErrorsCopyWithImpl<$Res, _$BatchErrorsImpl> - implements _$$BatchErrorsImplCopyWith<$Res> { - __$$BatchErrorsImplCopyWithImpl( - _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) +class __$$VectorStoreFileBatchObjectImplCopyWithImpl<$Res> + extends _$VectorStoreFileBatchObjectCopyWithImpl<$Res, + _$VectorStoreFileBatchObjectImpl> + implements _$$VectorStoreFileBatchObjectImplCopyWith<$Res> { + __$$VectorStoreFileBatchObjectImplCopyWithImpl( + _$VectorStoreFileBatchObjectImpl _value, + $Res Function(_$VectorStoreFileBatchObjectImpl) _then) : super(_value, _then); + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? object = freezed, - Object? data = freezed, + Object? id = null, + Object? object = null, + Object? createdAt = null, + Object? vectorStoreId = null, + Object? status = null, + Object? fileCounts = null, }) { - return _then(_$BatchErrorsImpl( - object: freezed == object + return _then(_$VectorStoreFileBatchObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String?, - data: freezed == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List?, + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + vectorStoreId: null == vectorStoreId + ? _value.vectorStoreId + : vectorStoreId // ignore: cast_nullable_to_non_nullable + as String, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectStatus, + fileCounts: null == fileCounts + ? _value.fileCounts + : fileCounts // ignore: cast_nullable_to_non_nullable + as VectorStoreFileBatchObjectFileCounts, )); } } /// @nodoc @JsonSerializable() -class _$BatchErrorsImpl extends _BatchErrors { - const _$BatchErrorsImpl( - {@JsonKey(includeIfNull: false) this.object, - @JsonKey(includeIfNull: false) final List? data}) - : _data = data, - super._(); +class _$VectorStoreFileBatchObjectImpl extends _VectorStoreFileBatchObject { + const _$VectorStoreFileBatchObjectImpl( + {required this.id, + required this.object, + @JsonKey(name: 'created_at') required this.createdAt, + @JsonKey(name: 'vector_store_id') required this.vectorStoreId, + required this.status, + @JsonKey(name: 'file_counts') required this.fileCounts}) + : super._(); + + factory _$VectorStoreFileBatchObjectImpl.fromJson( + Map json) => + _$$VectorStoreFileBatchObjectImplFromJson(json); + + /// The identifier, which can be referenced in API endpoints. + @override + final String id; + + /// The object type, which is always `vector_store.file_batch`. + @override + final String object; + + /// The Unix timestamp (in seconds) for when the vector store files batch was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; + + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override + @JsonKey(name: 'vector_store_id') + final String vectorStoreId; + + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + @override + final VectorStoreFileBatchObjectStatus status; + + /// The number of files per status. + @override + @JsonKey(name: 'file_counts') + final VectorStoreFileBatchObjectFileCounts fileCounts; + + @override + String toString() { + return 'VectorStoreFileBatchObject(id: $id, object: $object, createdAt: $createdAt, vectorStoreId: $vectorStoreId, status: $status, fileCounts: $fileCounts)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VectorStoreFileBatchObjectImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.object, object) || other.object == object) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.vectorStoreId, vectorStoreId) || + other.vectorStoreId == vectorStoreId) && + (identical(other.status, status) || other.status == status) && + (identical(other.fileCounts, fileCounts) || + other.fileCounts == fileCounts)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, id, object, createdAt, vectorStoreId, status, fileCounts); + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> + get copyWith => __$$VectorStoreFileBatchObjectImplCopyWithImpl< + _$VectorStoreFileBatchObjectImpl>(this, _$identity); + + @override + Map toJson() { + return _$$VectorStoreFileBatchObjectImplToJson( + this, + ); + } +} + +abstract class _VectorStoreFileBatchObject extends VectorStoreFileBatchObject { + const factory _VectorStoreFileBatchObject( + {required final String id, + required final String object, + @JsonKey(name: 'created_at') required final int createdAt, + @JsonKey(name: 'vector_store_id') required final String vectorStoreId, + required final VectorStoreFileBatchObjectStatus status, + @JsonKey(name: 'file_counts') + required final VectorStoreFileBatchObjectFileCounts fileCounts}) = + _$VectorStoreFileBatchObjectImpl; + const _VectorStoreFileBatchObject._() : super._(); + + factory _VectorStoreFileBatchObject.fromJson(Map json) = + _$VectorStoreFileBatchObjectImpl.fromJson; + + /// The identifier, which can be referenced in API endpoints. + @override + String get id; + + /// The object type, which is always `vector_store.file_batch`. + @override + String get object; + + /// The Unix timestamp (in seconds) for when the vector store files batch was created. + @override + @JsonKey(name: 'created_at') + int get createdAt; + + /// The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. + @override + @JsonKey(name: 'vector_store_id') + String get vectorStoreId; + + /// The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + @override + VectorStoreFileBatchObjectStatus get status; + + /// The number of files per status. + @override + @JsonKey(name: 'file_counts') + VectorStoreFileBatchObjectFileCounts get fileCounts; + + /// Create a copy of VectorStoreFileBatchObject + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreFileBatchObjectImplCopyWith<_$VectorStoreFileBatchObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +VectorStoreFileBatchObjectFileCounts + _$VectorStoreFileBatchObjectFileCountsFromJson(Map json) { + return _VectorStoreFileBatchObjectFileCounts.fromJson(json); +} + +/// @nodoc +mixin _$VectorStoreFileBatchObjectFileCounts { + /// The number of files that are currently being processed. + @JsonKey(name: 'in_progress') + int get inProgress => throw _privateConstructorUsedError; + + /// The number of files that have been processed. + int get completed => throw _privateConstructorUsedError; + + /// The number of files that have failed to process. + int get failed => throw _privateConstructorUsedError; + + /// The number of files that where cancelled. + int get cancelled => throw _privateConstructorUsedError; + + /// The total number of files. + int get total => throw _privateConstructorUsedError; + + /// Serializes this VectorStoreFileBatchObjectFileCounts to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $VectorStoreFileBatchObjectFileCountsCopyWith< + VectorStoreFileBatchObjectFileCounts> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + factory $VectorStoreFileBatchObjectFileCountsCopyWith( + VectorStoreFileBatchObjectFileCounts value, + $Res Function(VectorStoreFileBatchObjectFileCounts) then) = + _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + VectorStoreFileBatchObjectFileCounts>; + @useResult + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); +} + +/// @nodoc +class _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + $Val extends VectorStoreFileBatchObjectFileCounts> + implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + _$VectorStoreFileBatchObjectFileCountsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, + }) { + return _then(_value.copyWith( + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> + implements $VectorStoreFileBatchObjectFileCountsCopyWith<$Res> { + factory _$$VectorStoreFileBatchObjectFileCountsImplCopyWith( + _$VectorStoreFileBatchObjectFileCountsImpl value, + $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) then) = + __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'in_progress') int inProgress, + int completed, + int failed, + int cancelled, + int total}); +} + +/// @nodoc +class __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl<$Res> + extends _$VectorStoreFileBatchObjectFileCountsCopyWithImpl<$Res, + _$VectorStoreFileBatchObjectFileCountsImpl> + implements _$$VectorStoreFileBatchObjectFileCountsImplCopyWith<$Res> { + __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl( + _$VectorStoreFileBatchObjectFileCountsImpl _value, + $Res Function(_$VectorStoreFileBatchObjectFileCountsImpl) _then) + : super(_value, _then); + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inProgress = null, + Object? completed = null, + Object? failed = null, + Object? cancelled = null, + Object? total = null, + }) { + return _then(_$VectorStoreFileBatchObjectFileCountsImpl( + inProgress: null == inProgress + ? _value.inProgress + : inProgress // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + cancelled: null == cancelled + ? _value.cancelled + : cancelled // ignore: cast_nullable_to_non_nullable + as int, + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$VectorStoreFileBatchObjectFileCountsImpl + extends _VectorStoreFileBatchObjectFileCounts { + const _$VectorStoreFileBatchObjectFileCountsImpl( + {@JsonKey(name: 'in_progress') required this.inProgress, + required this.completed, + required this.failed, + required this.cancelled, + required this.total}) + : super._(); + + factory _$VectorStoreFileBatchObjectFileCountsImpl.fromJson( + Map json) => + _$$VectorStoreFileBatchObjectFileCountsImplFromJson(json); + + /// The number of files that are currently being processed. + @override + @JsonKey(name: 'in_progress') + final int inProgress; + + /// The number of files that have been processed. + @override + final int completed; + + /// The number of files that have failed to process. + @override + final int failed; + + /// The number of files that where cancelled. + @override + final int cancelled; + + /// The total number of files. + @override + final int total; + + @override + String toString() { + return 'VectorStoreFileBatchObjectFileCounts(inProgress: $inProgress, completed: $completed, failed: $failed, cancelled: $cancelled, total: $total)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$VectorStoreFileBatchObjectFileCountsImpl && + (identical(other.inProgress, inProgress) || + other.inProgress == inProgress) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed) && + (identical(other.cancelled, cancelled) || + other.cancelled == cancelled) && + (identical(other.total, total) || other.total == total)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => + Object.hash(runtimeType, inProgress, completed, failed, cancelled, total); + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< + _$VectorStoreFileBatchObjectFileCountsImpl> + get copyWith => __$$VectorStoreFileBatchObjectFileCountsImplCopyWithImpl< + _$VectorStoreFileBatchObjectFileCountsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$VectorStoreFileBatchObjectFileCountsImplToJson( + this, + ); + } +} + +abstract class _VectorStoreFileBatchObjectFileCounts + extends VectorStoreFileBatchObjectFileCounts { + const factory _VectorStoreFileBatchObjectFileCounts( + {@JsonKey(name: 'in_progress') required final int inProgress, + required final int completed, + required final int failed, + required final int cancelled, + required final int total}) = _$VectorStoreFileBatchObjectFileCountsImpl; + const _VectorStoreFileBatchObjectFileCounts._() : super._(); + + factory _VectorStoreFileBatchObjectFileCounts.fromJson( + Map json) = + _$VectorStoreFileBatchObjectFileCountsImpl.fromJson; + + /// The number of files that are currently being processed. + @override + @JsonKey(name: 'in_progress') + int get inProgress; + + /// The number of files that have been processed. + @override + int get completed; + + /// The number of files that have failed to process. + @override + int get failed; + + /// The number of files that where cancelled. + @override + int get cancelled; + + /// The total number of files. + @override + int get total; + + /// Create a copy of VectorStoreFileBatchObjectFileCounts + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$VectorStoreFileBatchObjectFileCountsImplCopyWith< + _$VectorStoreFileBatchObjectFileCountsImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateVectorStoreFileBatchRequest _$CreateVectorStoreFileBatchRequestFromJson( + Map json) { + return _CreateVectorStoreFileBatchRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateVectorStoreFileBatchRequest { + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @JsonKey(name: 'file_ids') + List get fileIds => throw _privateConstructorUsedError; + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy => + throw _privateConstructorUsedError; + + /// Serializes this CreateVectorStoreFileBatchRequest to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CreateVectorStoreFileBatchRequestCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + factory $CreateVectorStoreFileBatchRequestCopyWith( + CreateVectorStoreFileBatchRequest value, + $Res Function(CreateVectorStoreFileBatchRequest) then) = + _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + CreateVectorStoreFileBatchRequest>; + @useResult + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; +} + +/// @nodoc +class _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + $Val extends CreateVectorStoreFileBatchRequest> + implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + _$CreateVectorStoreFileBatchRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileIds = null, + Object? chunkingStrategy = freezed, + }) { + return _then(_value.copyWith( + fileIds: null == fileIds + ? _value.fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, + ) as $Val); + } + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy { + if (_value.chunkingStrategy == null) { + return null; + } + + return $ChunkingStrategyRequestParamCopyWith<$Res>(_value.chunkingStrategy!, + (value) { + return _then(_value.copyWith(chunkingStrategy: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> + implements $CreateVectorStoreFileBatchRequestCopyWith<$Res> { + factory _$$CreateVectorStoreFileBatchRequestImplCopyWith( + _$CreateVectorStoreFileBatchRequestImpl value, + $Res Function(_$CreateVectorStoreFileBatchRequestImpl) then) = + __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'file_ids') List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy}); + + @override + $ChunkingStrategyRequestParamCopyWith<$Res>? get chunkingStrategy; +} + +/// @nodoc +class __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl<$Res> + extends _$CreateVectorStoreFileBatchRequestCopyWithImpl<$Res, + _$CreateVectorStoreFileBatchRequestImpl> + implements _$$CreateVectorStoreFileBatchRequestImplCopyWith<$Res> { + __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl( + _$CreateVectorStoreFileBatchRequestImpl _value, + $Res Function(_$CreateVectorStoreFileBatchRequestImpl) _then) + : super(_value, _then); + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? fileIds = null, + Object? chunkingStrategy = freezed, + }) { + return _then(_$CreateVectorStoreFileBatchRequestImpl( + fileIds: null == fileIds + ? _value._fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List, + chunkingStrategy: freezed == chunkingStrategy + ? _value.chunkingStrategy + : chunkingStrategy // ignore: cast_nullable_to_non_nullable + as ChunkingStrategyRequestParam?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateVectorStoreFileBatchRequestImpl + extends _CreateVectorStoreFileBatchRequest { + const _$CreateVectorStoreFileBatchRequestImpl( + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + this.chunkingStrategy}) + : _fileIds = fileIds, + super._(); + + factory _$CreateVectorStoreFileBatchRequestImpl.fromJson( + Map json) => + _$$CreateVectorStoreFileBatchRequestImplFromJson(json); + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + final List _fileIds; + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override + @JsonKey(name: 'file_ids') + List get fileIds { + if (_fileIds is EqualUnmodifiableListView) return _fileIds; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_fileIds); + } + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy; + + @override + String toString() { + return 'CreateVectorStoreFileBatchRequest(fileIds: $fileIds, chunkingStrategy: $chunkingStrategy)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateVectorStoreFileBatchRequestImpl && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + (identical(other.chunkingStrategy, chunkingStrategy) || + other.chunkingStrategy == chunkingStrategy)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(_fileIds), chunkingStrategy); + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$CreateVectorStoreFileBatchRequestImplCopyWith< + _$CreateVectorStoreFileBatchRequestImpl> + get copyWith => __$$CreateVectorStoreFileBatchRequestImplCopyWithImpl< + _$CreateVectorStoreFileBatchRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateVectorStoreFileBatchRequestImplToJson( + this, + ); + } +} + +abstract class _CreateVectorStoreFileBatchRequest + extends CreateVectorStoreFileBatchRequest { + const factory _CreateVectorStoreFileBatchRequest( + {@JsonKey(name: 'file_ids') required final List fileIds, + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + final ChunkingStrategyRequestParam? chunkingStrategy}) = + _$CreateVectorStoreFileBatchRequestImpl; + const _CreateVectorStoreFileBatchRequest._() : super._(); + + factory _CreateVectorStoreFileBatchRequest.fromJson( + Map json) = + _$CreateVectorStoreFileBatchRequestImpl.fromJson; + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + @override + @JsonKey(name: 'file_ids') + List get fileIds; + + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @override + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? get chunkingStrategy; + + /// Create a copy of CreateVectorStoreFileBatchRequest + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateVectorStoreFileBatchRequestImplCopyWith< + _$CreateVectorStoreFileBatchRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Error _$ErrorFromJson(Map json) { + return _Error.fromJson(json); +} + +/// @nodoc +mixin _$Error { + /// The error code. + String? get code => throw _privateConstructorUsedError; + + /// A human-readable description of the error. + String get message => throw _privateConstructorUsedError; + + /// The parameter in the request that caused the error. + String? get param => throw _privateConstructorUsedError; + + /// The type of error. + String get type => throw _privateConstructorUsedError; + + /// Serializes this Error to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ErrorCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ErrorCopyWith<$Res> { + factory $ErrorCopyWith(Error value, $Res Function(Error) then) = + _$ErrorCopyWithImpl<$Res, Error>; + @useResult + $Res call({String? code, String message, String? param, String type}); +} + +/// @nodoc +class _$ErrorCopyWithImpl<$Res, $Val extends Error> + implements $ErrorCopyWith<$Res> { + _$ErrorCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = freezed, + Object? message = null, + Object? param = freezed, + Object? type = null, + }) { + return _then(_value.copyWith( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ErrorImplCopyWith<$Res> implements $ErrorCopyWith<$Res> { + factory _$$ErrorImplCopyWith( + _$ErrorImpl value, $Res Function(_$ErrorImpl) then) = + __$$ErrorImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String? code, String message, String? param, String type}); +} + +/// @nodoc +class __$$ErrorImplCopyWithImpl<$Res> + extends _$ErrorCopyWithImpl<$Res, _$ErrorImpl> + implements _$$ErrorImplCopyWith<$Res> { + __$$ErrorImplCopyWithImpl( + _$ErrorImpl _value, $Res Function(_$ErrorImpl) _then) + : super(_value, _then); + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = freezed, + Object? message = null, + Object? param = freezed, + Object? type = null, + }) { + return _then(_$ErrorImpl( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ErrorImpl extends _Error { + const _$ErrorImpl( + {required this.code, + required this.message, + required this.param, + required this.type}) + : super._(); + + factory _$ErrorImpl.fromJson(Map json) => + _$$ErrorImplFromJson(json); + + /// The error code. + @override + final String? code; + + /// A human-readable description of the error. + @override + final String message; + + /// The parameter in the request that caused the error. + @override + final String? param; + + /// The type of error. + @override + final String type; + + @override + String toString() { + return 'Error(code: $code, message: $message, param: $param, type: $type)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ErrorImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message) && + (identical(other.param, param) || other.param == param) && + (identical(other.type, type) || other.type == type)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, code, message, param, type); + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => + __$$ErrorImplCopyWithImpl<_$ErrorImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ErrorImplToJson( + this, + ); + } +} + +abstract class _Error extends Error { + const factory _Error( + {required final String? code, + required final String message, + required final String? param, + required final String type}) = _$ErrorImpl; + const _Error._() : super._(); + + factory _Error.fromJson(Map json) = _$ErrorImpl.fromJson; + + /// The error code. + @override + String? get code; + + /// A human-readable description of the error. + @override + String get message; + + /// The parameter in the request that caused the error. + @override + String? get param; + + /// The type of error. + @override + String get type; + + /// Create a copy of Error + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ErrorImplCopyWith<_$ErrorImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateBatchRequest _$CreateBatchRequestFromJson(Map json) { + return _CreateBatchRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateBatchRequest { + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + @JsonKey(name: 'input_file_id') + String get inputFileId => throw _privateConstructorUsedError; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint => throw _privateConstructorUsedError; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow => + throw _privateConstructorUsedError; + + /// Optional custom metadata for the batch. + @JsonKey(includeIfNull: false) + Map? get metadata => throw _privateConstructorUsedError; + + /// Serializes this CreateBatchRequest to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $CreateBatchRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateBatchRequestCopyWith<$Res> { + factory $CreateBatchRequestCopyWith( + CreateBatchRequest value, $Res Function(CreateBatchRequest) then) = + _$CreateBatchRequestCopyWithImpl<$Res, CreateBatchRequest>; + @useResult + $Res call( + {@JsonKey(name: 'input_file_id') String inputFileId, + BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) Map? metadata}); +} + +/// @nodoc +class _$CreateBatchRequestCopyWithImpl<$Res, $Val extends CreateBatchRequest> + implements $CreateBatchRequestCopyWith<$Res> { + _$CreateBatchRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputFileId = null, + Object? endpoint = null, + Object? completionWindow = null, + Object? metadata = freezed, + }) { + return _then(_value.copyWith( + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable + as String, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as Map?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateBatchRequestImplCopyWith<$Res> + implements $CreateBatchRequestCopyWith<$Res> { + factory _$$CreateBatchRequestImplCopyWith(_$CreateBatchRequestImpl value, + $Res Function(_$CreateBatchRequestImpl) then) = + __$$CreateBatchRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'input_file_id') String inputFileId, + BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) Map? metadata}); +} + +/// @nodoc +class __$$CreateBatchRequestImplCopyWithImpl<$Res> + extends _$CreateBatchRequestCopyWithImpl<$Res, _$CreateBatchRequestImpl> + implements _$$CreateBatchRequestImplCopyWith<$Res> { + __$$CreateBatchRequestImplCopyWithImpl(_$CreateBatchRequestImpl _value, + $Res Function(_$CreateBatchRequestImpl) _then) + : super(_value, _then); + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? inputFileId = null, + Object? endpoint = null, + Object? completionWindow = null, + Object? metadata = freezed, + }) { + return _then(_$CreateBatchRequestImpl( + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable + as String, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + metadata: freezed == metadata + ? _value._metadata + : metadata // ignore: cast_nullable_to_non_nullable + as Map?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateBatchRequestImpl extends _CreateBatchRequest { + const _$CreateBatchRequestImpl( + {@JsonKey(name: 'input_file_id') required this.inputFileId, + required this.endpoint, + @JsonKey(name: 'completion_window') required this.completionWindow, + @JsonKey(includeIfNull: false) final Map? metadata}) + : _metadata = metadata, + super._(); + + factory _$CreateBatchRequestImpl.fromJson(Map json) => + _$$CreateBatchRequestImplFromJson(json); + + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + @override + @JsonKey(name: 'input_file_id') + final String inputFileId; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + @override + final BatchEndpoint endpoint; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override + @JsonKey(name: 'completion_window') + final BatchCompletionWindow completionWindow; + + /// Optional custom metadata for the batch. + final Map? _metadata; + + /// Optional custom metadata for the batch. + @override + @JsonKey(includeIfNull: false) + Map? get metadata { + final value = _metadata; + if (value == null) return null; + if (_metadata is EqualUnmodifiableMapView) return _metadata; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(value); + } + + @override + String toString() { + return 'CreateBatchRequest(inputFileId: $inputFileId, endpoint: $endpoint, completionWindow: $completionWindow, metadata: $metadata)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateBatchRequestImpl && + (identical(other.inputFileId, inputFileId) || + other.inputFileId == inputFileId) && + (identical(other.endpoint, endpoint) || + other.endpoint == endpoint) && + (identical(other.completionWindow, completionWindow) || + other.completionWindow == completionWindow) && + const DeepCollectionEquality().equals(other._metadata, _metadata)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, inputFileId, endpoint, + completionWindow, const DeepCollectionEquality().hash(_metadata)); + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => + __$$CreateBatchRequestImplCopyWithImpl<_$CreateBatchRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateBatchRequestImplToJson( + this, + ); + } +} + +abstract class _CreateBatchRequest extends CreateBatchRequest { + const factory _CreateBatchRequest( + {@JsonKey(name: 'input_file_id') required final String inputFileId, + required final BatchEndpoint endpoint, + @JsonKey(name: 'completion_window') + required final BatchCompletionWindow completionWindow, + @JsonKey(includeIfNull: false) final Map? metadata}) = + _$CreateBatchRequestImpl; + const _CreateBatchRequest._() : super._(); + + factory _CreateBatchRequest.fromJson(Map json) = + _$CreateBatchRequestImpl.fromJson; + + /// The ID of an uploaded file that contains requests for the new batch. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + @override + @JsonKey(name: 'input_file_id') + String get inputFileId; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + @override + BatchEndpoint get endpoint; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow; + + /// Optional custom metadata for the batch. + @override + @JsonKey(includeIfNull: false) + Map? get metadata; + + /// Create a copy of CreateBatchRequest + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$CreateBatchRequestImplCopyWith<_$CreateBatchRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Batch _$BatchFromJson(Map json) { + return _Batch.fromJson(json); +} + +/// @nodoc +mixin _$Batch { + /// No Description + String get id => throw _privateConstructorUsedError; + + /// The object type, which is always `batch`. + BatchObject get object => throw _privateConstructorUsedError; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + BatchEndpoint get endpoint => throw _privateConstructorUsedError; + + /// No Description + @JsonKey(includeIfNull: false) + BatchErrors? get errors => throw _privateConstructorUsedError; + + /// The ID of the input file for the batch. + @JsonKey(name: 'input_file_id') + String get inputFileId => throw _privateConstructorUsedError; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow => + throw _privateConstructorUsedError; + + /// The current status of the batch. + BatchStatus get status => throw _privateConstructorUsedError; + + /// The ID of the file containing the outputs of successfully executed requests. + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? get outputFileId => throw _privateConstructorUsedError; + + /// The ID of the file containing the outputs of requests with errors. + @JsonKey(name: 'error_file_id', includeIfNull: false) + String? get errorFileId => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch started processing. + @JsonKey(name: 'in_progress_at', includeIfNull: false) + int? get inProgressAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch will expire. + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch started finalizing. + @JsonKey(name: 'finalizing_at', includeIfNull: false) + int? get finalizingAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch was completed. + @JsonKey(name: 'completed_at', includeIfNull: false) + int? get completedAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch failed. + @JsonKey(name: 'failed_at', includeIfNull: false) + int? get failedAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch expired. + @JsonKey(name: 'expired_at', includeIfNull: false) + int? get expiredAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @JsonKey(name: 'cancelling_at', includeIfNull: false) + int? get cancellingAt => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @JsonKey(name: 'cancelled_at', includeIfNull: false) + int? get cancelledAt => throw _privateConstructorUsedError; + + /// The request counts for different statuses within the batch. + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? get requestCounts => throw _privateConstructorUsedError; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + dynamic get metadata => throw _privateConstructorUsedError; + + /// Serializes this Batch to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $BatchCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BatchCopyWith<$Res> { + factory $BatchCopyWith(Batch value, $Res Function(Batch) then) = + _$BatchCopyWithImpl<$Res, Batch>; + @useResult + $Res call( + {String id, + BatchObject object, + BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) BatchErrors? errors, + @JsonKey(name: 'input_file_id') String inputFileId, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) dynamic metadata}); + + $BatchErrorsCopyWith<$Res>? get errors; + $BatchRequestCountsCopyWith<$Res>? get requestCounts; +} + +/// @nodoc +class _$BatchCopyWithImpl<$Res, $Val extends Batch> + implements $BatchCopyWith<$Res> { + _$BatchCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? object = null, + Object? endpoint = null, + Object? errors = freezed, + Object? inputFileId = null, + Object? completionWindow = null, + Object? status = null, + Object? outputFileId = freezed, + Object? errorFileId = freezed, + Object? createdAt = null, + Object? inProgressAt = freezed, + Object? expiresAt = freezed, + Object? finalizingAt = freezed, + Object? completedAt = freezed, + Object? failedAt = freezed, + Object? expiredAt = freezed, + Object? cancellingAt = freezed, + Object? cancelledAt = freezed, + Object? requestCounts = freezed, + Object? metadata = freezed, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as BatchObject, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + errors: freezed == errors + ? _value.errors + : errors // ignore: cast_nullable_to_non_nullable + as BatchErrors?, + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable + as String, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as BatchStatus, + outputFileId: freezed == outputFileId + ? _value.outputFileId + : outputFileId // ignore: cast_nullable_to_non_nullable + as String?, + errorFileId: freezed == errorFileId + ? _value.errorFileId + : errorFileId // ignore: cast_nullable_to_non_nullable + as String?, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + inProgressAt: freezed == inProgressAt + ? _value.inProgressAt + : inProgressAt // ignore: cast_nullable_to_non_nullable + as int?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + finalizingAt: freezed == finalizingAt + ? _value.finalizingAt + : finalizingAt // ignore: cast_nullable_to_non_nullable + as int?, + completedAt: freezed == completedAt + ? _value.completedAt + : completedAt // ignore: cast_nullable_to_non_nullable + as int?, + failedAt: freezed == failedAt + ? _value.failedAt + : failedAt // ignore: cast_nullable_to_non_nullable + as int?, + expiredAt: freezed == expiredAt + ? _value.expiredAt + : expiredAt // ignore: cast_nullable_to_non_nullable + as int?, + cancellingAt: freezed == cancellingAt + ? _value.cancellingAt + : cancellingAt // ignore: cast_nullable_to_non_nullable + as int?, + cancelledAt: freezed == cancelledAt + ? _value.cancelledAt + : cancelledAt // ignore: cast_nullable_to_non_nullable + as int?, + requestCounts: freezed == requestCounts + ? _value.requestCounts + : requestCounts // ignore: cast_nullable_to_non_nullable + as BatchRequestCounts?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, + ) as $Val); + } + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $BatchErrorsCopyWith<$Res>? get errors { + if (_value.errors == null) { + return null; + } + + return $BatchErrorsCopyWith<$Res>(_value.errors!, (value) { + return _then(_value.copyWith(errors: value) as $Val); + }); + } + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $BatchRequestCountsCopyWith<$Res>? get requestCounts { + if (_value.requestCounts == null) { + return null; + } + + return $BatchRequestCountsCopyWith<$Res>(_value.requestCounts!, (value) { + return _then(_value.copyWith(requestCounts: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$BatchImplCopyWith<$Res> implements $BatchCopyWith<$Res> { + factory _$$BatchImplCopyWith( + _$BatchImpl value, $Res Function(_$BatchImpl) then) = + __$$BatchImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + BatchObject object, + BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) BatchErrors? errors, + @JsonKey(name: 'input_file_id') String inputFileId, + @JsonKey(name: 'completion_window') + BatchCompletionWindow completionWindow, + BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) String? errorFileId, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) dynamic metadata}); + + @override + $BatchErrorsCopyWith<$Res>? get errors; + @override + $BatchRequestCountsCopyWith<$Res>? get requestCounts; +} + +/// @nodoc +class __$$BatchImplCopyWithImpl<$Res> + extends _$BatchCopyWithImpl<$Res, _$BatchImpl> + implements _$$BatchImplCopyWith<$Res> { + __$$BatchImplCopyWithImpl( + _$BatchImpl _value, $Res Function(_$BatchImpl) _then) + : super(_value, _then); + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? object = null, + Object? endpoint = null, + Object? errors = freezed, + Object? inputFileId = null, + Object? completionWindow = null, + Object? status = null, + Object? outputFileId = freezed, + Object? errorFileId = freezed, + Object? createdAt = null, + Object? inProgressAt = freezed, + Object? expiresAt = freezed, + Object? finalizingAt = freezed, + Object? completedAt = freezed, + Object? failedAt = freezed, + Object? expiredAt = freezed, + Object? cancellingAt = freezed, + Object? cancelledAt = freezed, + Object? requestCounts = freezed, + Object? metadata = freezed, + }) { + return _then(_$BatchImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as BatchObject, + endpoint: null == endpoint + ? _value.endpoint + : endpoint // ignore: cast_nullable_to_non_nullable + as BatchEndpoint, + errors: freezed == errors + ? _value.errors + : errors // ignore: cast_nullable_to_non_nullable + as BatchErrors?, + inputFileId: null == inputFileId + ? _value.inputFileId + : inputFileId // ignore: cast_nullable_to_non_nullable + as String, + completionWindow: null == completionWindow + ? _value.completionWindow + : completionWindow // ignore: cast_nullable_to_non_nullable + as BatchCompletionWindow, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as BatchStatus, + outputFileId: freezed == outputFileId + ? _value.outputFileId + : outputFileId // ignore: cast_nullable_to_non_nullable + as String?, + errorFileId: freezed == errorFileId + ? _value.errorFileId + : errorFileId // ignore: cast_nullable_to_non_nullable + as String?, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + inProgressAt: freezed == inProgressAt + ? _value.inProgressAt + : inProgressAt // ignore: cast_nullable_to_non_nullable + as int?, + expiresAt: freezed == expiresAt + ? _value.expiresAt + : expiresAt // ignore: cast_nullable_to_non_nullable + as int?, + finalizingAt: freezed == finalizingAt + ? _value.finalizingAt + : finalizingAt // ignore: cast_nullable_to_non_nullable + as int?, + completedAt: freezed == completedAt + ? _value.completedAt + : completedAt // ignore: cast_nullable_to_non_nullable + as int?, + failedAt: freezed == failedAt + ? _value.failedAt + : failedAt // ignore: cast_nullable_to_non_nullable + as int?, + expiredAt: freezed == expiredAt + ? _value.expiredAt + : expiredAt // ignore: cast_nullable_to_non_nullable + as int?, + cancellingAt: freezed == cancellingAt + ? _value.cancellingAt + : cancellingAt // ignore: cast_nullable_to_non_nullable + as int?, + cancelledAt: freezed == cancelledAt + ? _value.cancelledAt + : cancelledAt // ignore: cast_nullable_to_non_nullable + as int?, + requestCounts: freezed == requestCounts + ? _value.requestCounts + : requestCounts // ignore: cast_nullable_to_non_nullable + as BatchRequestCounts?, + metadata: freezed == metadata + ? _value.metadata + : metadata // ignore: cast_nullable_to_non_nullable + as dynamic, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$BatchImpl extends _Batch { + const _$BatchImpl( + {required this.id, + required this.object, + required this.endpoint, + @JsonKey(includeIfNull: false) this.errors, + @JsonKey(name: 'input_file_id') required this.inputFileId, + @JsonKey(name: 'completion_window') required this.completionWindow, + required this.status, + @JsonKey(name: 'output_file_id', includeIfNull: false) this.outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) this.errorFileId, + @JsonKey(name: 'created_at') required this.createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) this.inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) this.expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) this.finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) this.completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) this.failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) this.expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) this.cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) this.cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) this.requestCounts, + @JsonKey(includeIfNull: false) this.metadata}) + : super._(); + + factory _$BatchImpl.fromJson(Map json) => + _$$BatchImplFromJson(json); + + /// No Description + @override + final String id; + + /// The object type, which is always `batch`. + @override + final BatchObject object; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + @override + final BatchEndpoint endpoint; + + /// No Description + @override + @JsonKey(includeIfNull: false) + final BatchErrors? errors; + + /// The ID of the input file for the batch. + @override + @JsonKey(name: 'input_file_id') + final String inputFileId; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override + @JsonKey(name: 'completion_window') + final BatchCompletionWindow completionWindow; + + /// The current status of the batch. + @override + final BatchStatus status; + + /// The ID of the file containing the outputs of successfully executed requests. + @override + @JsonKey(name: 'output_file_id', includeIfNull: false) + final String? outputFileId; + + /// The ID of the file containing the outputs of requests with errors. + @override + @JsonKey(name: 'error_file_id', includeIfNull: false) + final String? errorFileId; + + /// The Unix timestamp (in seconds) for when the batch was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; + + /// The Unix timestamp (in seconds) for when the batch started processing. + @override + @JsonKey(name: 'in_progress_at', includeIfNull: false) + final int? inProgressAt; + + /// The Unix timestamp (in seconds) for when the batch will expire. + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + final int? expiresAt; + + /// The Unix timestamp (in seconds) for when the batch started finalizing. + @override + @JsonKey(name: 'finalizing_at', includeIfNull: false) + final int? finalizingAt; + + /// The Unix timestamp (in seconds) for when the batch was completed. + @override + @JsonKey(name: 'completed_at', includeIfNull: false) + final int? completedAt; + + /// The Unix timestamp (in seconds) for when the batch failed. + @override + @JsonKey(name: 'failed_at', includeIfNull: false) + final int? failedAt; + + /// The Unix timestamp (in seconds) for when the batch expired. + @override + @JsonKey(name: 'expired_at', includeIfNull: false) + final int? expiredAt; + + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @override + @JsonKey(name: 'cancelling_at', includeIfNull: false) + final int? cancellingAt; + + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @override + @JsonKey(name: 'cancelled_at', includeIfNull: false) + final int? cancelledAt; + + /// The request counts for different statuses within the batch. + @override + @JsonKey(name: 'request_counts', includeIfNull: false) + final BatchRequestCounts? requestCounts; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + final dynamic metadata; + + @override + String toString() { + return 'Batch(id: $id, object: $object, endpoint: $endpoint, errors: $errors, inputFileId: $inputFileId, completionWindow: $completionWindow, status: $status, outputFileId: $outputFileId, errorFileId: $errorFileId, createdAt: $createdAt, inProgressAt: $inProgressAt, expiresAt: $expiresAt, finalizingAt: $finalizingAt, completedAt: $completedAt, failedAt: $failedAt, expiredAt: $expiredAt, cancellingAt: $cancellingAt, cancelledAt: $cancelledAt, requestCounts: $requestCounts, metadata: $metadata)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$BatchImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.object, object) || other.object == object) && + (identical(other.endpoint, endpoint) || + other.endpoint == endpoint) && + (identical(other.errors, errors) || other.errors == errors) && + (identical(other.inputFileId, inputFileId) || + other.inputFileId == inputFileId) && + (identical(other.completionWindow, completionWindow) || + other.completionWindow == completionWindow) && + (identical(other.status, status) || other.status == status) && + (identical(other.outputFileId, outputFileId) || + other.outputFileId == outputFileId) && + (identical(other.errorFileId, errorFileId) || + other.errorFileId == errorFileId) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.inProgressAt, inProgressAt) || + other.inProgressAt == inProgressAt) && + (identical(other.expiresAt, expiresAt) || + other.expiresAt == expiresAt) && + (identical(other.finalizingAt, finalizingAt) || + other.finalizingAt == finalizingAt) && + (identical(other.completedAt, completedAt) || + other.completedAt == completedAt) && + (identical(other.failedAt, failedAt) || + other.failedAt == failedAt) && + (identical(other.expiredAt, expiredAt) || + other.expiredAt == expiredAt) && + (identical(other.cancellingAt, cancellingAt) || + other.cancellingAt == cancellingAt) && + (identical(other.cancelledAt, cancelledAt) || + other.cancelledAt == cancelledAt) && + (identical(other.requestCounts, requestCounts) || + other.requestCounts == requestCounts) && + const DeepCollectionEquality().equals(other.metadata, metadata)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hashAll([ + runtimeType, + id, + object, + endpoint, + errors, + inputFileId, + completionWindow, + status, + outputFileId, + errorFileId, + createdAt, + inProgressAt, + expiresAt, + finalizingAt, + completedAt, + failedAt, + expiredAt, + cancellingAt, + cancelledAt, + requestCounts, + const DeepCollectionEquality().hash(metadata) + ]); + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$BatchImplCopyWith<_$BatchImpl> get copyWith => + __$$BatchImplCopyWithImpl<_$BatchImpl>(this, _$identity); + + @override + Map toJson() { + return _$$BatchImplToJson( + this, + ); + } +} + +abstract class _Batch extends Batch { + const factory _Batch( + {required final String id, + required final BatchObject object, + required final BatchEndpoint endpoint, + @JsonKey(includeIfNull: false) final BatchErrors? errors, + @JsonKey(name: 'input_file_id') required final String inputFileId, + @JsonKey(name: 'completion_window') + required final BatchCompletionWindow completionWindow, + required final BatchStatus status, + @JsonKey(name: 'output_file_id', includeIfNull: false) + final String? outputFileId, + @JsonKey(name: 'error_file_id', includeIfNull: false) + final String? errorFileId, + @JsonKey(name: 'created_at') required final int createdAt, + @JsonKey(name: 'in_progress_at', includeIfNull: false) + final int? inProgressAt, + @JsonKey(name: 'expires_at', includeIfNull: false) final int? expiresAt, + @JsonKey(name: 'finalizing_at', includeIfNull: false) + final int? finalizingAt, + @JsonKey(name: 'completed_at', includeIfNull: false) + final int? completedAt, + @JsonKey(name: 'failed_at', includeIfNull: false) final int? failedAt, + @JsonKey(name: 'expired_at', includeIfNull: false) final int? expiredAt, + @JsonKey(name: 'cancelling_at', includeIfNull: false) + final int? cancellingAt, + @JsonKey(name: 'cancelled_at', includeIfNull: false) + final int? cancelledAt, + @JsonKey(name: 'request_counts', includeIfNull: false) + final BatchRequestCounts? requestCounts, + @JsonKey(includeIfNull: false) final dynamic metadata}) = _$BatchImpl; + const _Batch._() : super._(); + + factory _Batch.fromJson(Map json) = _$BatchImpl.fromJson; + + /// No Description + @override + String get id; + + /// The object type, which is always `batch`. + @override + BatchObject get object; + + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + @override + BatchEndpoint get endpoint; + + /// No Description + @override + @JsonKey(includeIfNull: false) + BatchErrors? get errors; + + /// The ID of the input file for the batch. + @override + @JsonKey(name: 'input_file_id') + String get inputFileId; + + /// The time frame within which the batch should be processed. Currently only `24h` is supported. + @override + @JsonKey(name: 'completion_window') + BatchCompletionWindow get completionWindow; + + /// The current status of the batch. + @override + BatchStatus get status; + + /// The ID of the file containing the outputs of successfully executed requests. + @override + @JsonKey(name: 'output_file_id', includeIfNull: false) + String? get outputFileId; + + /// The ID of the file containing the outputs of requests with errors. + @override + @JsonKey(name: 'error_file_id', includeIfNull: false) + String? get errorFileId; + + /// The Unix timestamp (in seconds) for when the batch was created. + @override + @JsonKey(name: 'created_at') + int get createdAt; + + /// The Unix timestamp (in seconds) for when the batch started processing. + @override + @JsonKey(name: 'in_progress_at', includeIfNull: false) + int? get inProgressAt; + + /// The Unix timestamp (in seconds) for when the batch will expire. + @override + @JsonKey(name: 'expires_at', includeIfNull: false) + int? get expiresAt; + + /// The Unix timestamp (in seconds) for when the batch started finalizing. + @override + @JsonKey(name: 'finalizing_at', includeIfNull: false) + int? get finalizingAt; + + /// The Unix timestamp (in seconds) for when the batch was completed. + @override + @JsonKey(name: 'completed_at', includeIfNull: false) + int? get completedAt; + + /// The Unix timestamp (in seconds) for when the batch failed. + @override + @JsonKey(name: 'failed_at', includeIfNull: false) + int? get failedAt; + + /// The Unix timestamp (in seconds) for when the batch expired. + @override + @JsonKey(name: 'expired_at', includeIfNull: false) + int? get expiredAt; + + /// The Unix timestamp (in seconds) for when the batch started cancelling. + @override + @JsonKey(name: 'cancelling_at', includeIfNull: false) + int? get cancellingAt; + + /// The Unix timestamp (in seconds) for when the batch was cancelled. + @override + @JsonKey(name: 'cancelled_at', includeIfNull: false) + int? get cancelledAt; + + /// The request counts for different statuses within the batch. + @override + @JsonKey(name: 'request_counts', includeIfNull: false) + BatchRequestCounts? get requestCounts; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + dynamic get metadata; + + /// Create a copy of Batch + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$BatchImplCopyWith<_$BatchImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BatchErrors _$BatchErrorsFromJson(Map json) { + return _BatchErrors.fromJson(json); +} + +/// @nodoc +mixin _$BatchErrors { + /// The object type, which is always `list`. + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; + + /// No Description + @JsonKey(includeIfNull: false) + List? get data => throw _privateConstructorUsedError; + + /// Serializes this BatchErrors to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $BatchErrorsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BatchErrorsCopyWith<$Res> { + factory $BatchErrorsCopyWith( + BatchErrors value, $Res Function(BatchErrors) then) = + _$BatchErrorsCopyWithImpl<$Res, BatchErrors>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? object, + @JsonKey(includeIfNull: false) List? data}); +} + +/// @nodoc +class _$BatchErrorsCopyWithImpl<$Res, $Val extends BatchErrors> + implements $BatchErrorsCopyWith<$Res> { + _$BatchErrorsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? object = freezed, + Object? data = freezed, + }) { + return _then(_value.copyWith( + object: freezed == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String?, + data: freezed == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$BatchErrorsImplCopyWith<$Res> + implements $BatchErrorsCopyWith<$Res> { + factory _$$BatchErrorsImplCopyWith( + _$BatchErrorsImpl value, $Res Function(_$BatchErrorsImpl) then) = + __$$BatchErrorsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? object, + @JsonKey(includeIfNull: false) List? data}); +} + +/// @nodoc +class __$$BatchErrorsImplCopyWithImpl<$Res> + extends _$BatchErrorsCopyWithImpl<$Res, _$BatchErrorsImpl> + implements _$$BatchErrorsImplCopyWith<$Res> { + __$$BatchErrorsImplCopyWithImpl( + _$BatchErrorsImpl _value, $Res Function(_$BatchErrorsImpl) _then) + : super(_value, _then); + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? object = freezed, + Object? data = freezed, + }) { + return _then(_$BatchErrorsImpl( + object: freezed == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String?, + data: freezed == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$BatchErrorsImpl extends _BatchErrors { + const _$BatchErrorsImpl( + {@JsonKey(includeIfNull: false) this.object, + @JsonKey(includeIfNull: false) final List? data}) + : _data = data, + super._(); + + factory _$BatchErrorsImpl.fromJson(Map json) => + _$$BatchErrorsImplFromJson(json); + + /// The object type, which is always `list`. + @override + @JsonKey(includeIfNull: false) + final String? object; + + /// No Description + final List? _data; + + /// No Description + @override + @JsonKey(includeIfNull: false) + List? get data { + final value = _data; + if (value == null) return null; + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'BatchErrors(object: $object, data: $data)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$BatchErrorsImpl && + (identical(other.object, object) || other.object == object) && + const DeepCollectionEquality().equals(other._data, _data)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, object, const DeepCollectionEquality().hash(_data)); + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => + __$$BatchErrorsImplCopyWithImpl<_$BatchErrorsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$BatchErrorsImplToJson( + this, + ); + } +} + +abstract class _BatchErrors extends BatchErrors { + const factory _BatchErrors( + {@JsonKey(includeIfNull: false) final String? object, + @JsonKey(includeIfNull: false) + final List? data}) = _$BatchErrorsImpl; + const _BatchErrors._() : super._(); + + factory _BatchErrors.fromJson(Map json) = + _$BatchErrorsImpl.fromJson; + + /// The object type, which is always `list`. + @override + @JsonKey(includeIfNull: false) + String? get object; + + /// No Description + @override + @JsonKey(includeIfNull: false) + List? get data; + + /// Create a copy of BatchErrors + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BatchRequestCounts _$BatchRequestCountsFromJson(Map json) { + return _BatchRequestCounts.fromJson(json); +} + +/// @nodoc +mixin _$BatchRequestCounts { + /// Total number of requests in the batch. + int get total => throw _privateConstructorUsedError; + + /// Number of requests that have been completed successfully. + int get completed => throw _privateConstructorUsedError; + + /// Number of requests that have failed. + int get failed => throw _privateConstructorUsedError; + + /// Serializes this BatchRequestCounts to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $BatchRequestCountsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BatchRequestCountsCopyWith<$Res> { + factory $BatchRequestCountsCopyWith( + BatchRequestCounts value, $Res Function(BatchRequestCounts) then) = + _$BatchRequestCountsCopyWithImpl<$Res, BatchRequestCounts>; + @useResult + $Res call({int total, int completed, int failed}); +} + +/// @nodoc +class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> + implements $BatchRequestCountsCopyWith<$Res> { + _$BatchRequestCountsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? total = null, + Object? completed = null, + Object? failed = null, + }) { + return _then(_value.copyWith( + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$BatchRequestCountsImplCopyWith<$Res> + implements $BatchRequestCountsCopyWith<$Res> { + factory _$$BatchRequestCountsImplCopyWith(_$BatchRequestCountsImpl value, + $Res Function(_$BatchRequestCountsImpl) then) = + __$$BatchRequestCountsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int total, int completed, int failed}); +} + +/// @nodoc +class __$$BatchRequestCountsImplCopyWithImpl<$Res> + extends _$BatchRequestCountsCopyWithImpl<$Res, _$BatchRequestCountsImpl> + implements _$$BatchRequestCountsImplCopyWith<$Res> { + __$$BatchRequestCountsImplCopyWithImpl(_$BatchRequestCountsImpl _value, + $Res Function(_$BatchRequestCountsImpl) _then) + : super(_value, _then); + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? total = null, + Object? completed = null, + Object? failed = null, + }) { + return _then(_$BatchRequestCountsImpl( + total: null == total + ? _value.total + : total // ignore: cast_nullable_to_non_nullable + as int, + completed: null == completed + ? _value.completed + : completed // ignore: cast_nullable_to_non_nullable + as int, + failed: null == failed + ? _value.failed + : failed // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$BatchRequestCountsImpl extends _BatchRequestCounts { + const _$BatchRequestCountsImpl( + {required this.total, required this.completed, required this.failed}) + : super._(); + + factory _$BatchRequestCountsImpl.fromJson(Map json) => + _$$BatchRequestCountsImplFromJson(json); + + /// Total number of requests in the batch. + @override + final int total; + + /// Number of requests that have been completed successfully. + @override + final int completed; + + /// Number of requests that have failed. + @override + final int failed; + + @override + String toString() { + return 'BatchRequestCounts(total: $total, completed: $completed, failed: $failed)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$BatchRequestCountsImpl && + (identical(other.total, total) || other.total == total) && + (identical(other.completed, completed) || + other.completed == completed) && + (identical(other.failed, failed) || other.failed == failed)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, total, completed, failed); + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => + __$$BatchRequestCountsImplCopyWithImpl<_$BatchRequestCountsImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$BatchRequestCountsImplToJson( + this, + ); + } +} + +abstract class _BatchRequestCounts extends BatchRequestCounts { + const factory _BatchRequestCounts( + {required final int total, + required final int completed, + required final int failed}) = _$BatchRequestCountsImpl; + const _BatchRequestCounts._() : super._(); + + factory _BatchRequestCounts.fromJson(Map json) = + _$BatchRequestCountsImpl.fromJson; + + /// Total number of requests in the batch. + @override + int get total; + + /// Number of requests that have been completed successfully. + @override + int get completed; + + /// Number of requests that have failed. + @override + int get failed; + + /// Create a copy of BatchRequestCounts + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +BatchErrorsDataInner _$BatchErrorsDataInnerFromJson(Map json) { + return _BatchErrorsDataInner.fromJson(json); +} + +/// @nodoc +mixin _$BatchErrorsDataInner { + /// An error code identifying the error type. + @JsonKey(includeIfNull: false) + String? get code => throw _privateConstructorUsedError; + + /// A human-readable message providing more details about the error. + @JsonKey(includeIfNull: false) + String? get message => throw _privateConstructorUsedError; + + /// The name of the parameter that caused the error, if applicable. + @JsonKey(includeIfNull: false) + String? get param => throw _privateConstructorUsedError; + + /// The line number of the input file where the error occurred, if applicable. + @JsonKey(includeIfNull: false) + int? get line => throw _privateConstructorUsedError; + + /// Serializes this BatchErrorsDataInner to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $BatchErrorsDataInnerCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $BatchErrorsDataInnerCopyWith<$Res> { + factory $BatchErrorsDataInnerCopyWith(BatchErrorsDataInner value, + $Res Function(BatchErrorsDataInner) then) = + _$BatchErrorsDataInnerCopyWithImpl<$Res, BatchErrorsDataInner>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? code, + @JsonKey(includeIfNull: false) String? message, + @JsonKey(includeIfNull: false) String? param, + @JsonKey(includeIfNull: false) int? line}); +} + +/// @nodoc +class _$BatchErrorsDataInnerCopyWithImpl<$Res, + $Val extends BatchErrorsDataInner> + implements $BatchErrorsDataInnerCopyWith<$Res> { + _$BatchErrorsDataInnerCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = freezed, + Object? message = freezed, + Object? param = freezed, + Object? line = freezed, + }) { + return _then(_value.copyWith( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: freezed == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String?, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + line: freezed == line + ? _value.line + : line // ignore: cast_nullable_to_non_nullable + as int?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$BatchErrorsDataInnerImplCopyWith<$Res> + implements $BatchErrorsDataInnerCopyWith<$Res> { + factory _$$BatchErrorsDataInnerImplCopyWith(_$BatchErrorsDataInnerImpl value, + $Res Function(_$BatchErrorsDataInnerImpl) then) = + __$$BatchErrorsDataInnerImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? code, + @JsonKey(includeIfNull: false) String? message, + @JsonKey(includeIfNull: false) String? param, + @JsonKey(includeIfNull: false) int? line}); +} + +/// @nodoc +class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> + extends _$BatchErrorsDataInnerCopyWithImpl<$Res, _$BatchErrorsDataInnerImpl> + implements _$$BatchErrorsDataInnerImplCopyWith<$Res> { + __$$BatchErrorsDataInnerImplCopyWithImpl(_$BatchErrorsDataInnerImpl _value, + $Res Function(_$BatchErrorsDataInnerImpl) _then) + : super(_value, _then); + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = freezed, + Object? message = freezed, + Object? param = freezed, + Object? line = freezed, + }) { + return _then(_$BatchErrorsDataInnerImpl( + code: freezed == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String?, + message: freezed == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String?, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + line: freezed == line + ? _value.line + : line // ignore: cast_nullable_to_non_nullable + as int?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { + const _$BatchErrorsDataInnerImpl( + {@JsonKey(includeIfNull: false) this.code, + @JsonKey(includeIfNull: false) this.message, + @JsonKey(includeIfNull: false) this.param, + @JsonKey(includeIfNull: false) this.line}) + : super._(); + + factory _$BatchErrorsDataInnerImpl.fromJson(Map json) => + _$$BatchErrorsDataInnerImplFromJson(json); + + /// An error code identifying the error type. + @override + @JsonKey(includeIfNull: false) + final String? code; + + /// A human-readable message providing more details about the error. + @override + @JsonKey(includeIfNull: false) + final String? message; + + /// The name of the parameter that caused the error, if applicable. + @override + @JsonKey(includeIfNull: false) + final String? param; + + /// The line number of the input file where the error occurred, if applicable. + @override + @JsonKey(includeIfNull: false) + final int? line; + + @override + String toString() { + return 'BatchErrorsDataInner(code: $code, message: $message, param: $param, line: $line)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$BatchErrorsDataInnerImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message) && + (identical(other.param, param) || other.param == param) && + (identical(other.line, line) || other.line == line)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, code, message, param, line); + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> + get copyWith => + __$$BatchErrorsDataInnerImplCopyWithImpl<_$BatchErrorsDataInnerImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$BatchErrorsDataInnerImplToJson( + this, + ); + } +} + +abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { + const factory _BatchErrorsDataInner( + {@JsonKey(includeIfNull: false) final String? code, + @JsonKey(includeIfNull: false) final String? message, + @JsonKey(includeIfNull: false) final String? param, + @JsonKey(includeIfNull: false) final int? line}) = + _$BatchErrorsDataInnerImpl; + const _BatchErrorsDataInner._() : super._(); + + factory _BatchErrorsDataInner.fromJson(Map json) = + _$BatchErrorsDataInnerImpl.fromJson; + + /// An error code identifying the error type. + @override + @JsonKey(includeIfNull: false) + String? get code; + + /// A human-readable message providing more details about the error. + @override + @JsonKey(includeIfNull: false) + String? get message; + + /// The name of the parameter that caused the error, if applicable. + @override + @JsonKey(includeIfNull: false) + String? get param; + + /// The line number of the input file where the error occurred, if applicable. + @override + @JsonKey(includeIfNull: false) + int? get line; + + /// Create a copy of BatchErrorsDataInner + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ListBatchesResponse _$ListBatchesResponseFromJson(Map json) { + return _ListBatchesResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListBatchesResponse { + /// No Description + List get data => throw _privateConstructorUsedError; + + /// The ID of the first batch in the list. + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId => throw _privateConstructorUsedError; + + /// The ID of the last batch in the list. + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId => throw _privateConstructorUsedError; + + /// Whether there are more batches available. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + + /// The object type, which is always `list`. + ListBatchesResponseObject get object => throw _privateConstructorUsedError; + + /// Serializes this ListBatchesResponse to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ListBatchesResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListBatchesResponseCopyWith<$Res> { + factory $ListBatchesResponseCopyWith( + ListBatchesResponse value, $Res Function(ListBatchesResponse) then) = + _$ListBatchesResponseCopyWithImpl<$Res, ListBatchesResponse>; + @useResult + $Res call( + {List data, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore, + ListBatchesResponseObject object}); +} + +/// @nodoc +class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> + implements $ListBatchesResponseCopyWith<$Res> { + _$ListBatchesResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + Object? object = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListBatchesResponseObject, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListBatchesResponseImplCopyWith<$Res> + implements $ListBatchesResponseCopyWith<$Res> { + factory _$$ListBatchesResponseImplCopyWith(_$ListBatchesResponseImpl value, + $Res Function(_$ListBatchesResponseImpl) then) = + __$$ListBatchesResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {List data, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore, + ListBatchesResponseObject object}); +} + +/// @nodoc +class __$$ListBatchesResponseImplCopyWithImpl<$Res> + extends _$ListBatchesResponseCopyWithImpl<$Res, _$ListBatchesResponseImpl> + implements _$$ListBatchesResponseImplCopyWith<$Res> { + __$$ListBatchesResponseImplCopyWithImpl(_$ListBatchesResponseImpl _value, + $Res Function(_$ListBatchesResponseImpl) _then) + : super(_value, _then); + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + Object? object = null, + }) { + return _then(_$ListBatchesResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListBatchesResponseObject, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ListBatchesResponseImpl extends _ListBatchesResponse { + const _$ListBatchesResponseImpl( + {required final List data, + @JsonKey(name: 'first_id', includeIfNull: false) this.firstId, + @JsonKey(name: 'last_id', includeIfNull: false) this.lastId, + @JsonKey(name: 'has_more') required this.hasMore, + required this.object}) + : _data = data, + super._(); + + factory _$ListBatchesResponseImpl.fromJson(Map json) => + _$$ListBatchesResponseImplFromJson(json); + + /// No Description + final List _data; + + /// No Description + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + /// The ID of the first batch in the list. + @override + @JsonKey(name: 'first_id', includeIfNull: false) + final String? firstId; + + /// The ID of the last batch in the list. + @override + @JsonKey(name: 'last_id', includeIfNull: false) + final String? lastId; + + /// Whether there are more batches available. + @override + @JsonKey(name: 'has_more') + final bool hasMore; + + /// The object type, which is always `list`. + @override + final ListBatchesResponseObject object; + + @override + String toString() { + return 'ListBatchesResponse(data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore, object: $object)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListBatchesResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.firstId, firstId) || other.firstId == firstId) && + (identical(other.lastId, lastId) || other.lastId == lastId) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_data), + firstId, + lastId, + hasMore, + object); + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => + __$$ListBatchesResponseImplCopyWithImpl<_$ListBatchesResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ListBatchesResponseImplToJson( + this, + ); + } +} + +abstract class _ListBatchesResponse extends ListBatchesResponse { + const factory _ListBatchesResponse( + {required final List data, + @JsonKey(name: 'first_id', includeIfNull: false) final String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) final String? lastId, + @JsonKey(name: 'has_more') required final bool hasMore, + required final ListBatchesResponseObject + object}) = _$ListBatchesResponseImpl; + const _ListBatchesResponse._() : super._(); + + factory _ListBatchesResponse.fromJson(Map json) = + _$ListBatchesResponseImpl.fromJson; + + /// No Description + @override + List get data; + + /// The ID of the first batch in the list. + @override + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId; + + /// The ID of the last batch in the list. + @override + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId; + + /// Whether there are more batches available. + @override + @JsonKey(name: 'has_more') + bool get hasMore; + + /// The object type, which is always `list`. + @override + ListBatchesResponseObject get object; + + /// Create a copy of ListBatchesResponse + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ChatCompletionMessage _$ChatCompletionMessageFromJson( + Map json) { + switch (json['role']) { + case 'system': + return ChatCompletionSystemMessage.fromJson(json); + case 'user': + return ChatCompletionUserMessage.fromJson(json); + case 'assistant': + return ChatCompletionAssistantMessage.fromJson(json); + case 'tool': + return ChatCompletionToolMessage.fromJson(json); + case 'function': + return ChatCompletionFunctionMessage.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'role', 'ChatCompletionMessage', + 'Invalid union type "${json['role']}"!'); + } +} + +/// @nodoc +mixin _$ChatCompletionMessage { + /// The role of the messages author, in this case `system`. + ChatCompletionMessageRole get role => throw _privateConstructorUsedError; + + /// The contents of the system message. + Object? get content => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessage to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ChatCompletionMessageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionMessageCopyWith<$Res> { + factory $ChatCompletionMessageCopyWith(ChatCompletionMessage value, + $Res Function(ChatCompletionMessage) then) = + _$ChatCompletionMessageCopyWithImpl<$Res, ChatCompletionMessage>; + @useResult + $Res call({ChatCompletionMessageRole role}); +} + +/// @nodoc +class _$ChatCompletionMessageCopyWithImpl<$Res, + $Val extends ChatCompletionMessage> + implements $ChatCompletionMessageCopyWith<$Res> { + _$ChatCompletionMessageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + }) { + return _then(_value.copyWith( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionSystemMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionSystemMessageImplCopyWith( + _$ChatCompletionSystemMessageImpl value, + $Res Function(_$ChatCompletionSystemMessageImpl) then) = + __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageRole role, + String content, + @JsonKey(includeIfNull: false) String? name}); +} + +/// @nodoc +class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionSystemMessageImpl> + implements _$$ChatCompletionSystemMessageImplCopyWith<$Res> { + __$$ChatCompletionSystemMessageImplCopyWithImpl( + _$ChatCompletionSystemMessageImpl _value, + $Res Function(_$ChatCompletionSystemMessageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = null, + Object? name = freezed, + }) { + return _then(_$ChatCompletionSystemMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { + const _$ChatCompletionSystemMessageImpl( + {this.role = ChatCompletionMessageRole.system, + required this.content, + @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ChatCompletionSystemMessageImpl.fromJson( + Map json) => + _$$ChatCompletionSystemMessageImplFromJson(json); + + /// The role of the messages author, in this case `system`. + @override + @JsonKey() + final ChatCompletionMessageRole role; + + /// The contents of the system message. + @override + final String content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ChatCompletionMessage.system(role: $role, content: $content, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionSystemMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, role, content, name); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> + get copyWith => __$$ChatCompletionSystemMessageImplCopyWithImpl< + _$ChatCompletionSystemMessageImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) { + return system(role, content, name); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) { + return system?.call(role, content, name); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) { + if (system != null) { + return system(role, content, name); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return system(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return system?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (system != null) { + return system(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionSystemMessageImplToJson( + this, + ); + } +} + +abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { + const factory ChatCompletionSystemMessage( + {final ChatCompletionMessageRole role, + required final String content, + @JsonKey(includeIfNull: false) final String? name}) = + _$ChatCompletionSystemMessageImpl; + const ChatCompletionSystemMessage._() : super._(); + + factory ChatCompletionSystemMessage.fromJson(Map json) = + _$ChatCompletionSystemMessageImpl.fromJson; + + /// The role of the messages author, in this case `system`. + @override + ChatCompletionMessageRole get role; + + /// The contents of the system message. + @override + String get content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionUserMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionUserMessageImplCopyWith( + _$ChatCompletionUserMessageImpl value, + $Res Function(_$ChatCompletionUserMessageImpl) then) = + __$$ChatCompletionUserMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name}); + + $ChatCompletionUserMessageContentCopyWith<$Res> get content; +} + +/// @nodoc +class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionUserMessageImpl> + implements _$$ChatCompletionUserMessageImplCopyWith<$Res> { + __$$ChatCompletionUserMessageImplCopyWithImpl( + _$ChatCompletionUserMessageImpl _value, + $Res Function(_$ChatCompletionUserMessageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = null, + Object? name = freezed, + }) { + return _then(_$ChatCompletionUserMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as ChatCompletionUserMessageContent, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChatCompletionUserMessageContentCopyWith<$Res> get content { + return $ChatCompletionUserMessageContentCopyWith<$Res>(_value.content, + (value) { + return _then(_value.copyWith(content: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { + const _$ChatCompletionUserMessageImpl( + {this.role = ChatCompletionMessageRole.user, + @_ChatCompletionUserMessageContentConverter() required this.content, + @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ChatCompletionUserMessageImpl.fromJson(Map json) => + _$$ChatCompletionUserMessageImplFromJson(json); + + /// The role of the messages author, in this case `user`. + @override + @JsonKey() + final ChatCompletionMessageRole role; + + /// The contents of the user message. + @override + @_ChatCompletionUserMessageContentConverter() + final ChatCompletionUserMessageContent content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ChatCompletionMessage.user(role: $role, content: $content, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionUserMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, role, content, name); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> + get copyWith => __$$ChatCompletionUserMessageImplCopyWithImpl< + _$ChatCompletionUserMessageImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) { + return user(role, content, name); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) { + return user?.call(role, content, name); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) { + if (user != null) { + return user(role, content, name); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return user(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return user?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (user != null) { + return user(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionUserMessageImplToJson( + this, + ); + } +} + +abstract class ChatCompletionUserMessage extends ChatCompletionMessage { + const factory ChatCompletionUserMessage( + {final ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + required final ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) final String? name}) = + _$ChatCompletionUserMessageImpl; + const ChatCompletionUserMessage._() : super._(); + + factory ChatCompletionUserMessage.fromJson(Map json) = + _$ChatCompletionUserMessageImpl.fromJson; + + /// The role of the messages author, in this case `user`. + @override + ChatCompletionMessageRole get role; + + /// The contents of the user message. + @override + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent get content; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionAssistantMessageImplCopyWith( + _$ChatCompletionAssistantMessageImpl value, + $Res Function(_$ChatCompletionAssistantMessageImpl) then) = + __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall}); + + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; +} + +/// @nodoc +class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionAssistantMessageImpl> + implements _$$ChatCompletionAssistantMessageImplCopyWith<$Res> { + __$$ChatCompletionAssistantMessageImplCopyWithImpl( + _$ChatCompletionAssistantMessageImpl _value, + $Res Function(_$ChatCompletionAssistantMessageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = freezed, + Object? refusal = freezed, + Object? name = freezed, + Object? toolCalls = freezed, + Object? functionCall = freezed, + }) { + return _then(_$ChatCompletionAssistantMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + )); + } + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { + if (_value.functionCall == null) { + return null; + } + + return $ChatCompletionMessageFunctionCallCopyWith<$Res>( + _value.functionCall!, (value) { + return _then(_value.copyWith(functionCall: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionAssistantMessageImpl + extends ChatCompletionAssistantMessage { + const _$ChatCompletionAssistantMessageImpl( + {this.role = ChatCompletionMessageRole.assistant, + @JsonKey(includeIfNull: false) this.content, + @JsonKey(includeIfNull: false) this.refusal, + @JsonKey(includeIfNull: false) this.name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall}) + : _toolCalls = toolCalls, + super._(); + + factory _$ChatCompletionAssistantMessageImpl.fromJson( + Map json) => + _$$ChatCompletionAssistantMessageImplFromJson(json); + + /// The role of the messages author, in this case `assistant`. + @override + @JsonKey() + final ChatCompletionMessageRole role; + + /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @override + @JsonKey(includeIfNull: false) + final String? content; + + /// The refusal message by the assistant. + @override + @JsonKey(includeIfNull: false) + final String? refusal; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @override + @JsonKey(includeIfNull: false) + final String? name; + + /// No Description + final List? _toolCalls; + + /// No Description + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + @override + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall; + + @override + String toString() { + return 'ChatCompletionMessage.assistant(role: $role, content: $content, refusal: $refusal, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionAssistantMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.refusal, refusal) || other.refusal == refusal) && + (identical(other.name, name) || other.name == name) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls) && + (identical(other.functionCall, functionCall) || + other.functionCall == functionCall)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, role, content, refusal, name, + const DeepCollectionEquality().hash(_toolCalls), functionCall); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionAssistantMessageImplCopyWith< + _$ChatCompletionAssistantMessageImpl> + get copyWith => __$$ChatCompletionAssistantMessageImplCopyWithImpl< + _$ChatCompletionAssistantMessageImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) { + return assistant(role, content, refusal, name, toolCalls, functionCall); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) { + return assistant?.call( + role, content, refusal, name, toolCalls, functionCall); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) { + if (assistant != null) { + return assistant(role, content, refusal, name, toolCalls, functionCall); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return assistant(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return assistant?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (assistant != null) { + return assistant(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionAssistantMessageImplToJson( + this, + ); + } +} + +abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { + const factory ChatCompletionAssistantMessage( + {final ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) final String? content, + @JsonKey(includeIfNull: false) final String? refusal, + @JsonKey(includeIfNull: false) final String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall}) = + _$ChatCompletionAssistantMessageImpl; + const ChatCompletionAssistantMessage._() : super._(); + + factory ChatCompletionAssistantMessage.fromJson(Map json) = + _$ChatCompletionAssistantMessageImpl.fromJson; + + /// The role of the messages author, in this case `assistant`. + @override + ChatCompletionMessageRole get role; + + /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + @override + @JsonKey(includeIfNull: false) + String? get content; + + /// The refusal message by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + @JsonKey(includeIfNull: false) + String? get name; + + /// No Description + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionAssistantMessageImplCopyWith< + _$ChatCompletionAssistantMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionToolMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionToolMessageImplCopyWith( + _$ChatCompletionToolMessageImpl value, + $Res Function(_$ChatCompletionToolMessageImpl) then) = + __$$ChatCompletionToolMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageRole role, + String content, + @JsonKey(name: 'tool_call_id') String toolCallId}); +} + +/// @nodoc +class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionToolMessageImpl> + implements _$$ChatCompletionToolMessageImplCopyWith<$Res> { + __$$ChatCompletionToolMessageImplCopyWithImpl( + _$ChatCompletionToolMessageImpl _value, + $Res Function(_$ChatCompletionToolMessageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = null, + Object? toolCallId = null, + }) { + return _then(_$ChatCompletionToolMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + toolCallId: null == toolCallId + ? _value.toolCallId + : toolCallId // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { + const _$ChatCompletionToolMessageImpl( + {this.role = ChatCompletionMessageRole.tool, + required this.content, + @JsonKey(name: 'tool_call_id') required this.toolCallId}) + : super._(); + + factory _$ChatCompletionToolMessageImpl.fromJson(Map json) => + _$$ChatCompletionToolMessageImplFromJson(json); + + /// The role of the messages author, in this case `tool`. + @override + @JsonKey() + final ChatCompletionMessageRole role; + + /// The contents of the tool message. + @override + final String content; + + /// Tool call that this message is responding to. + @override + @JsonKey(name: 'tool_call_id') + final String toolCallId; + + @override + String toString() { + return 'ChatCompletionMessage.tool(role: $role, content: $content, toolCallId: $toolCallId)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionToolMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.toolCallId, toolCallId) || + other.toolCallId == toolCallId)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, role, content, toolCallId); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> + get copyWith => __$$ChatCompletionToolMessageImplCopyWithImpl< + _$ChatCompletionToolMessageImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) { + return tool(role, content, toolCallId); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) { + return tool?.call(role, content, toolCallId); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) { + if (tool != null) { + return tool(role, content, toolCallId); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return tool(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return tool?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (tool != null) { + return tool(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionToolMessageImplToJson( + this, + ); + } +} + +abstract class ChatCompletionToolMessage extends ChatCompletionMessage { + const factory ChatCompletionToolMessage( + {final ChatCompletionMessageRole role, + required final String content, + @JsonKey(name: 'tool_call_id') required final String toolCallId}) = + _$ChatCompletionToolMessageImpl; + const ChatCompletionToolMessage._() : super._(); + + factory ChatCompletionToolMessage.fromJson(Map json) = + _$ChatCompletionToolMessageImpl.fromJson; + + /// The role of the messages author, in this case `tool`. + @override + ChatCompletionMessageRole get role; + + /// The contents of the tool message. + @override + String get content; + + /// Tool call that this message is responding to. + @JsonKey(name: 'tool_call_id') + String get toolCallId; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionFunctionMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionFunctionMessageImplCopyWith( + _$ChatCompletionFunctionMessageImpl value, + $Res Function(_$ChatCompletionFunctionMessageImpl) then) = + __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ChatCompletionMessageRole role, String? content, String name}); +} + +/// @nodoc +class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionFunctionMessageImpl> + implements _$$ChatCompletionFunctionMessageImplCopyWith<$Res> { + __$$ChatCompletionFunctionMessageImplCopyWithImpl( + _$ChatCompletionFunctionMessageImpl _value, + $Res Function(_$ChatCompletionFunctionMessageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = freezed, + Object? name = null, + }) { + return _then(_$ChatCompletionFunctionMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionFunctionMessageImpl + extends ChatCompletionFunctionMessage { + const _$ChatCompletionFunctionMessageImpl( + {this.role = ChatCompletionMessageRole.function, + required this.content, + required this.name}) + : super._(); + + factory _$ChatCompletionFunctionMessageImpl.fromJson( + Map json) => + _$$ChatCompletionFunctionMessageImplFromJson(json); + + /// The role of the messages author, in this case `function`. + @override + @JsonKey() + final ChatCompletionMessageRole role; + + /// The contents of the function message. + @override + final String? content; + + /// The name of the function to call. + @override + final String name; + + @override + String toString() { + return 'ChatCompletionMessage.function(role: $role, content: $content, name: $name)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionFunctionMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, role, content, name); + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionFunctionMessageImplCopyWith< + _$ChatCompletionFunctionMessageImpl> + get copyWith => __$$ChatCompletionFunctionMessageImplCopyWithImpl< + _$ChatCompletionFunctionMessageImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name) + system, + required TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name) + user, + required TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall) + assistant, + required TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId) + tool, + required TResult Function( + ChatCompletionMessageRole role, String? content, String name) + function, + }) { + return function(role, content, name); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult? Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult? Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult? Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult? Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + }) { + return function?.call(role, content, name); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(includeIfNull: false) String? name)? + system, + TResult Function( + ChatCompletionMessageRole role, + @_ChatCompletionUserMessageContentConverter() + ChatCompletionUserMessageContent content, + @JsonKey(includeIfNull: false) String? name)? + user, + TResult Function( + ChatCompletionMessageRole role, + @JsonKey(includeIfNull: false) String? content, + @JsonKey(includeIfNull: false) String? refusal, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall)? + assistant, + TResult Function(ChatCompletionMessageRole role, String content, + @JsonKey(name: 'tool_call_id') String toolCallId)? + tool, + TResult Function( + ChatCompletionMessageRole role, String? content, String name)? + function, + required TResult orElse(), + }) { + if (function != null) { + return function(role, content, name); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionSystemMessage value) system, + required TResult Function(ChatCompletionUserMessage value) user, + required TResult Function(ChatCompletionAssistantMessage value) assistant, + required TResult Function(ChatCompletionToolMessage value) tool, + required TResult Function(ChatCompletionFunctionMessage value) function, + }) { + return function(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionSystemMessage value)? system, + TResult? Function(ChatCompletionUserMessage value)? user, + TResult? Function(ChatCompletionAssistantMessage value)? assistant, + TResult? Function(ChatCompletionToolMessage value)? tool, + TResult? Function(ChatCompletionFunctionMessage value)? function, + }) { + return function?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionSystemMessage value)? system, + TResult Function(ChatCompletionUserMessage value)? user, + TResult Function(ChatCompletionAssistantMessage value)? assistant, + TResult Function(ChatCompletionToolMessage value)? tool, + TResult Function(ChatCompletionFunctionMessage value)? function, + required TResult orElse(), + }) { + if (function != null) { + return function(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionFunctionMessageImplToJson( + this, + ); + } +} + +abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { + const factory ChatCompletionFunctionMessage( + {final ChatCompletionMessageRole role, + required final String? content, + required final String name}) = _$ChatCompletionFunctionMessageImpl; + const ChatCompletionFunctionMessage._() : super._(); + + factory ChatCompletionFunctionMessage.fromJson(Map json) = + _$ChatCompletionFunctionMessageImpl.fromJson; + + /// The role of the messages author, in this case `function`. + @override + ChatCompletionMessageRole get role; + + /// The contents of the function message. + @override + String? get content; + + /// The name of the function to call. + String get name; + + /// Create a copy of ChatCompletionMessage + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionFunctionMessageImplCopyWith< + _$ChatCompletionFunctionMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionUserMessageContent _$ChatCompletionUserMessageContentFromJson( + Map json) { + switch (json['runtimeType']) { + case 'parts': + return ChatCompletionMessageContentParts.fromJson(json); + case 'string': + return ChatCompletionUserMessageContentString.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ChatCompletionUserMessageContent', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ChatCompletionUserMessageContent { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(List value) + parts, + required TResult Function(String value) string, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? parts, + TResult? Function(String value)? string, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? parts, + TResult Function(String value)? string, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionUserMessageContent to a JSON map. + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionUserMessageContentCopyWith<$Res> { + factory $ChatCompletionUserMessageContentCopyWith( + ChatCompletionUserMessageContent value, + $Res Function(ChatCompletionUserMessageContent) then) = + _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + ChatCompletionUserMessageContent>; +} + +/// @nodoc +class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + $Val extends ChatCompletionUserMessageContent> + implements $ChatCompletionUserMessageContentCopyWith<$Res> { + _$ChatCompletionUserMessageContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. +} + +/// @nodoc +abstract class _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartsImplCopyWith( + _$ChatCompletionMessageContentPartsImpl value, + $Res Function(_$ChatCompletionMessageContentPartsImpl) then) = + __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> + extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartsImpl> + implements _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartsImplCopyWithImpl( + _$ChatCompletionMessageContentPartsImpl _value, + $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ChatCompletionMessageContentPartsImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageContentPartsImpl + extends ChatCompletionMessageContentParts { + const _$ChatCompletionMessageContentPartsImpl( + final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'parts', + super._(); + + factory _$ChatCompletionMessageContentPartsImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartsImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionUserMessageContent.parts(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageContentPartsImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageContentPartsImplCopyWith< + _$ChatCompletionMessageContentPartsImpl> + get copyWith => __$$ChatCompletionMessageContentPartsImplCopyWithImpl< + _$ChatCompletionMessageContentPartsImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) + parts, + required TResult Function(String value) string, + }) { + return parts(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? parts, + TResult? Function(String value)? string, + }) { + return parts?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? parts, + TResult Function(String value)? string, + required TResult orElse(), + }) { + if (parts != null) { + return parts(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, + }) { + return parts(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, + }) { + return parts?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, + required TResult orElse(), + }) { + if (parts != null) { + return parts(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartsImplToJson( + this, + ); + } +} + +abstract class ChatCompletionMessageContentParts + extends ChatCompletionUserMessageContent { + const factory ChatCompletionMessageContentParts( + final List value) = + _$ChatCompletionMessageContentPartsImpl; + const ChatCompletionMessageContentParts._() : super._(); + + factory ChatCompletionMessageContentParts.fromJson( + Map json) = + _$ChatCompletionMessageContentPartsImpl.fromJson; + + @override + List get value; + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartsImplCopyWith< + _$ChatCompletionMessageContentPartsImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { + factory _$$ChatCompletionUserMessageContentStringImplCopyWith( + _$ChatCompletionUserMessageContentStringImpl value, + $Res Function(_$ChatCompletionUserMessageContentStringImpl) then) = + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> + extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, + _$ChatCompletionUserMessageContentStringImpl> + implements _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl( + _$ChatCompletionUserMessageContentStringImpl _value, + $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$ChatCompletionUserMessageContentStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionUserMessageContentStringImpl + extends ChatCompletionUserMessageContentString { + const _$ChatCompletionUserMessageContentStringImpl(this.value, + {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$ChatCompletionUserMessageContentStringImpl.fromJson( + Map json) => + _$$ChatCompletionUserMessageContentStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionUserMessageContent.string(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionUserMessageContentStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, value); + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionUserMessageContentStringImplCopyWith< + _$ChatCompletionUserMessageContentStringImpl> + get copyWith => + __$$ChatCompletionUserMessageContentStringImplCopyWithImpl< + _$ChatCompletionUserMessageContentStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(List value) + parts, + required TResult Function(String value) string, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(List value)? parts, + TResult? Function(String value)? string, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(List value)? parts, + TResult Function(String value)? string, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentParts value) parts, + required TResult Function(ChatCompletionUserMessageContentString value) + string, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentParts value)? parts, + TResult? Function(ChatCompletionUserMessageContentString value)? string, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentParts value)? parts, + TResult Function(ChatCompletionUserMessageContentString value)? string, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionUserMessageContentStringImplToJson( + this, + ); + } +} + +abstract class ChatCompletionUserMessageContentString + extends ChatCompletionUserMessageContent { + const factory ChatCompletionUserMessageContentString(final String value) = + _$ChatCompletionUserMessageContentStringImpl; + const ChatCompletionUserMessageContentString._() : super._(); + + factory ChatCompletionUserMessageContentString.fromJson( + Map json) = + _$ChatCompletionUserMessageContentStringImpl.fromJson; + + @override + String get value; + + /// Create a copy of ChatCompletionUserMessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionUserMessageContentStringImplCopyWith< + _$ChatCompletionUserMessageContentStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( + Map json) { + switch (json['type']) { + case 'text': + return ChatCompletionMessageContentPartText.fromJson(json); + case 'image': + return ChatCompletionMessageContentPartImage.fromJson(json); + case 'refusal': + return ChatCompletionMessageContentPartRefusal.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'ChatCompletionMessageContentPart', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$ChatCompletionMessageContentPart { + /// The type of the content part, in this case `text`. + ChatCompletionMessageContentPartType get type => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this ChatCompletionMessageContentPart to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ChatCompletionMessageContentPartCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionMessageContentPartCopyWith<$Res> { + factory $ChatCompletionMessageContentPartCopyWith( + ChatCompletionMessageContentPart value, + $Res Function(ChatCompletionMessageContentPart) then) = + _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + ChatCompletionMessageContentPart>; + @useResult + $Res call({ChatCompletionMessageContentPartType type}); +} + +/// @nodoc +class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + $Val extends ChatCompletionMessageContentPart> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + _$ChatCompletionMessageContentPartCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartTextImplCopyWith( + _$ChatCompletionMessageContentPartTextImpl value, + $Res Function(_$ChatCompletionMessageContentPartTextImpl) then) = + __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({ChatCompletionMessageContentPartType type, String text}); +} + +/// @nodoc +class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartTextImpl> + implements _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartTextImplCopyWithImpl( + _$ChatCompletionMessageContentPartTextImpl _value, + $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = null, + }) { + return _then(_$ChatCompletionMessageContentPartTextImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageContentPartTextImpl + extends ChatCompletionMessageContentPartText { + const _$ChatCompletionMessageContentPartTextImpl( + {this.type = ChatCompletionMessageContentPartType.text, + required this.text}) + : super._(); + + factory _$ChatCompletionMessageContentPartTextImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartTextImplFromJson(json); + + /// The type of the content part, in this case `text`. + @override + @JsonKey() + final ChatCompletionMessageContentPartType type; + + /// The text content. + @override + final String text; + + @override + String toString() { + return 'ChatCompletionMessageContentPart.text(type: $type, text: $text)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageContentPartTextImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, text); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageContentPartTextImplCopyWith< + _$ChatCompletionMessageContentPartTextImpl> + get copyWith => __$$ChatCompletionMessageContentPartTextImplCopyWithImpl< + _$ChatCompletionMessageContentPartTextImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) { + return text(type, this.text); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) { + return text?.call(type, this.text); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) { + if (text != null) { + return text(type, this.text); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) { + return text(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) { + return text?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartTextImplToJson( + this, + ); + } +} + +abstract class ChatCompletionMessageContentPartText + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartText( + {final ChatCompletionMessageContentPartType type, + required final String text}) = _$ChatCompletionMessageContentPartTextImpl; + const ChatCompletionMessageContentPartText._() : super._(); + + factory ChatCompletionMessageContentPartText.fromJson( + Map json) = + _$ChatCompletionMessageContentPartTextImpl.fromJson; + + /// The type of the content part, in this case `text`. + @override + ChatCompletionMessageContentPartType get type; + + /// The text content. + String get text; + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartTextImplCopyWith< + _$ChatCompletionMessageContentPartTextImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartImageImplCopyWith( + _$ChatCompletionMessageContentPartImageImpl value, + $Res Function(_$ChatCompletionMessageContentPartImageImpl) then) = + __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl}); + + $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl; +} + +/// @nodoc +class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartImageImpl> + implements _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartImageImplCopyWithImpl( + _$ChatCompletionMessageContentPartImageImpl _value, + $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) + : super(_value, _then); + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? imageUrl = null, + }) { + return _then(_$ChatCompletionMessageContentPartImageImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + imageUrl: null == imageUrl + ? _value.imageUrl + : imageUrl // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageUrl, + )); + } + + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { + return $ChatCompletionMessageImageUrlCopyWith<$Res>(_value.imageUrl, + (value) { + return _then(_value.copyWith(imageUrl: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageContentPartImageImpl + extends ChatCompletionMessageContentPartImage { + const _$ChatCompletionMessageContentPartImageImpl( + {this.type = ChatCompletionMessageContentPartType.imageUrl, + @JsonKey(name: 'image_url') required this.imageUrl}) + : super._(); - factory _$BatchErrorsImpl.fromJson(Map json) => - _$$BatchErrorsImplFromJson(json); + factory _$ChatCompletionMessageContentPartImageImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartImageImplFromJson(json); - /// The object type, which is always `list`. + /// The type of the content part, in this case `image_url`. @override - @JsonKey(includeIfNull: false) - final String? object; - - /// No Description - final List? _data; + @JsonKey() + final ChatCompletionMessageContentPartType type; - /// No Description + /// The URL of the image. @override - @JsonKey(includeIfNull: false) - List? get data { - final value = _data; - if (value == null) return null; - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + @JsonKey(name: 'image_url') + final ChatCompletionMessageImageUrl imageUrl; @override String toString() { - return 'BatchErrors(object: $object, data: $data)'; + return 'ChatCompletionMessageContentPart.image(type: $type, imageUrl: $imageUrl)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$BatchErrorsImpl && - (identical(other.object, object) || other.object == object) && - const DeepCollectionEquality().equals(other._data, _data)); + other is _$ChatCompletionMessageContentPartImageImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.imageUrl, imageUrl) || + other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, object, const DeepCollectionEquality().hash(_data)); + int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => - __$$BatchErrorsImplCopyWithImpl<_$BatchErrorsImpl>(this, _$identity); + _$$ChatCompletionMessageContentPartImageImplCopyWith< + _$ChatCompletionMessageContentPartImageImpl> + get copyWith => __$$ChatCompletionMessageContentPartImageImplCopyWithImpl< + _$ChatCompletionMessageContentPartImageImpl>(this, _$identity); @override - Map toJson() { - return _$$BatchErrorsImplToJson( - this, - ); + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) { + return image(type, imageUrl); } -} -abstract class _BatchErrors extends BatchErrors { - const factory _BatchErrors( - {@JsonKey(includeIfNull: false) final String? object, - @JsonKey(includeIfNull: false) - final List? data}) = _$BatchErrorsImpl; - const _BatchErrors._() : super._(); + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) { + return image?.call(type, imageUrl); + } - factory _BatchErrors.fromJson(Map json) = - _$BatchErrorsImpl.fromJson; + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) { + if (image != null) { + return image(type, imageUrl); + } + return orElse(); + } @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) { + return image(this); + } - /// The object type, which is always `list`. - @JsonKey(includeIfNull: false) - String? get object; @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) { + return image?.call(this); + } - /// No Description - @JsonKey(includeIfNull: false) - List? get data; @override - @JsonKey(ignore: true) - _$$BatchErrorsImplCopyWith<_$BatchErrorsImpl> get copyWith => - throw _privateConstructorUsedError; -} + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) { + if (image != null) { + return image(this); + } + return orElse(); + } -BatchRequestCounts _$BatchRequestCountsFromJson(Map json) { - return _BatchRequestCounts.fromJson(json); + @override + Map toJson() { + return _$$ChatCompletionMessageContentPartImageImplToJson( + this, + ); + } } -/// @nodoc -mixin _$BatchRequestCounts { - /// Total number of requests in the batch. - int get total => throw _privateConstructorUsedError; - - /// Number of requests that have been completed successfully. - int get completed => throw _privateConstructorUsedError; - - /// Number of requests that have failed. - int get failed => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BatchRequestCountsCopyWith get copyWith => - throw _privateConstructorUsedError; -} +abstract class ChatCompletionMessageContentPartImage + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartImage( + {final ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') + required final ChatCompletionMessageImageUrl imageUrl}) = + _$ChatCompletionMessageContentPartImageImpl; + const ChatCompletionMessageContentPartImage._() : super._(); -/// @nodoc -abstract class $BatchRequestCountsCopyWith<$Res> { - factory $BatchRequestCountsCopyWith( - BatchRequestCounts value, $Res Function(BatchRequestCounts) then) = - _$BatchRequestCountsCopyWithImpl<$Res, BatchRequestCounts>; - @useResult - $Res call({int total, int completed, int failed}); -} + factory ChatCompletionMessageContentPartImage.fromJson( + Map json) = + _$ChatCompletionMessageContentPartImageImpl.fromJson; -/// @nodoc -class _$BatchRequestCountsCopyWithImpl<$Res, $Val extends BatchRequestCounts> - implements $BatchRequestCountsCopyWith<$Res> { - _$BatchRequestCountsCopyWithImpl(this._value, this._then); + /// The type of the content part, in this case `image_url`. + @override + ChatCompletionMessageContentPartType get type; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The URL of the image. + @JsonKey(name: 'image_url') + ChatCompletionMessageImageUrl get imageUrl; - @pragma('vm:prefer-inline') + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - $Res call({ - Object? total = null, - Object? completed = null, - Object? failed = null, - }) { - return _then(_value.copyWith( - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, - ) as $Val); - } + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartImageImplCopyWith< + _$ChatCompletionMessageContentPartImageImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$BatchRequestCountsImplCopyWith<$Res> - implements $BatchRequestCountsCopyWith<$Res> { - factory _$$BatchRequestCountsImplCopyWith(_$BatchRequestCountsImpl value, - $Res Function(_$BatchRequestCountsImpl) then) = - __$$BatchRequestCountsImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> + implements $ChatCompletionMessageContentPartCopyWith<$Res> { + factory _$$ChatCompletionMessageContentPartRefusalImplCopyWith( + _$ChatCompletionMessageContentPartRefusalImpl value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) then) = + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res>; @override @useResult - $Res call({int total, int completed, int failed}); + $Res call({ChatCompletionMessageContentPartType type, String refusal}); } /// @nodoc -class __$$BatchRequestCountsImplCopyWithImpl<$Res> - extends _$BatchRequestCountsCopyWithImpl<$Res, _$BatchRequestCountsImpl> - implements _$$BatchRequestCountsImplCopyWith<$Res> { - __$$BatchRequestCountsImplCopyWithImpl(_$BatchRequestCountsImpl _value, - $Res Function(_$BatchRequestCountsImpl) _then) +class __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, + _$ChatCompletionMessageContentPartRefusalImpl> + implements _$$ChatCompletionMessageContentPartRefusalImplCopyWith<$Res> { + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl( + _$ChatCompletionMessageContentPartRefusalImpl _value, + $Res Function(_$ChatCompletionMessageContentPartRefusalImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? total = null, - Object? completed = null, - Object? failed = null, + Object? type = null, + Object? refusal = null, }) { - return _then(_$BatchRequestCountsImpl( - total: null == total - ? _value.total - : total // ignore: cast_nullable_to_non_nullable - as int, - completed: null == completed - ? _value.completed - : completed // ignore: cast_nullable_to_non_nullable - as int, - failed: null == failed - ? _value.failed - : failed // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$ChatCompletionMessageContentPartRefusalImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageContentPartType, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$BatchRequestCountsImpl extends _BatchRequestCounts { - const _$BatchRequestCountsImpl( - {required this.total, required this.completed, required this.failed}) +class _$ChatCompletionMessageContentPartRefusalImpl + extends ChatCompletionMessageContentPartRefusal { + const _$ChatCompletionMessageContentPartRefusalImpl( + {this.type = ChatCompletionMessageContentPartType.refusal, + required this.refusal}) : super._(); - factory _$BatchRequestCountsImpl.fromJson(Map json) => - _$$BatchRequestCountsImplFromJson(json); - - /// Total number of requests in the batch. - @override - final int total; + factory _$ChatCompletionMessageContentPartRefusalImpl.fromJson( + Map json) => + _$$ChatCompletionMessageContentPartRefusalImplFromJson(json); - /// Number of requests that have been completed successfully. + /// The type of the content part, in this case `refusal`. @override - final int completed; + @JsonKey() + final ChatCompletionMessageContentPartType type; - /// Number of requests that have failed. + /// The refusal message generated by the model. @override - final int failed; + final String refusal; @override String toString() { - return 'BatchRequestCounts(total: $total, completed: $completed, failed: $failed)'; + return 'ChatCompletionMessageContentPart.refusal(type: $type, refusal: $refusal)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$BatchRequestCountsImpl && - (identical(other.total, total) || other.total == total) && - (identical(other.completed, completed) || - other.completed == completed) && - (identical(other.failed, failed) || other.failed == failed)); + other is _$ChatCompletionMessageContentPartRefusalImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, total, completed, failed); + int get hashCode => Object.hash(runtimeType, type, refusal); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => - __$$BatchRequestCountsImplCopyWithImpl<_$BatchRequestCountsImpl>( - this, _$identity); + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => + __$$ChatCompletionMessageContentPartRefusalImplCopyWithImpl< + _$ChatCompletionMessageContentPartRefusalImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + ChatCompletionMessageContentPartType type, String text) + text, + required TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) + image, + required TResult Function( + ChatCompletionMessageContentPartType type, String refusal) + refusal, + }) { + return refusal(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult? Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult? Function( + ChatCompletionMessageContentPartType type, String refusal)? + refusal, + }) { + return refusal?.call(type, this.refusal); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ChatCompletionMessageContentPartType type, String text)? + text, + TResult Function(ChatCompletionMessageContentPartType type, + @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? + image, + TResult Function(ChatCompletionMessageContentPartType type, String refusal)? + refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(type, this.refusal); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(ChatCompletionMessageContentPartText value) text, + required TResult Function(ChatCompletionMessageContentPartImage value) + image, + required TResult Function(ChatCompletionMessageContentPartRefusal value) + refusal, + }) { + return refusal(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ChatCompletionMessageContentPartText value)? text, + TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(ChatCompletionMessageContentPartRefusal value)? refusal, + }) { + return refusal?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ChatCompletionMessageContentPartText value)? text, + TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(ChatCompletionMessageContentPartRefusal value)? refusal, + required TResult orElse(), + }) { + if (refusal != null) { + return refusal(this); + } + return orElse(); + } @override Map toJson() { - return _$$BatchRequestCountsImplToJson( + return _$$ChatCompletionMessageContentPartRefusalImplToJson( this, ); } } -abstract class _BatchRequestCounts extends BatchRequestCounts { - const factory _BatchRequestCounts( - {required final int total, - required final int completed, - required final int failed}) = _$BatchRequestCountsImpl; - const _BatchRequestCounts._() : super._(); - - factory _BatchRequestCounts.fromJson(Map json) = - _$BatchRequestCountsImpl.fromJson; +abstract class ChatCompletionMessageContentPartRefusal + extends ChatCompletionMessageContentPart { + const factory ChatCompletionMessageContentPartRefusal( + {final ChatCompletionMessageContentPartType type, + required final String refusal}) = + _$ChatCompletionMessageContentPartRefusalImpl; + const ChatCompletionMessageContentPartRefusal._() : super._(); - @override + factory ChatCompletionMessageContentPartRefusal.fromJson( + Map json) = + _$ChatCompletionMessageContentPartRefusalImpl.fromJson; - /// Total number of requests in the batch. - int get total; + /// The type of the content part, in this case `refusal`. @override + ChatCompletionMessageContentPartType get type; - /// Number of requests that have been completed successfully. - int get completed; - @override + /// The refusal message generated by the model. + String get refusal; - /// Number of requests that have failed. - int get failed; + /// Create a copy of ChatCompletionMessageContentPart + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$BatchRequestCountsImplCopyWith<_$BatchRequestCountsImpl> get copyWith => - throw _privateConstructorUsedError; + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageContentPartRefusalImplCopyWith< + _$ChatCompletionMessageContentPartRefusalImpl> + get copyWith => throw _privateConstructorUsedError; } -BatchErrorsDataInner _$BatchErrorsDataInnerFromJson(Map json) { - return _BatchErrorsDataInner.fromJson(json); +ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( + Map json) { + return _ChatCompletionMessageImageUrl.fromJson(json); } /// @nodoc -mixin _$BatchErrorsDataInner { - /// An error code identifying the error type. - @JsonKey(includeIfNull: false) - String? get code => throw _privateConstructorUsedError; - - /// A human-readable message providing more details about the error. - @JsonKey(includeIfNull: false) - String? get message => throw _privateConstructorUsedError; - - /// The name of the parameter that caused the error, if applicable. - @JsonKey(includeIfNull: false) - String? get param => throw _privateConstructorUsedError; +mixin _$ChatCompletionMessageImageUrl { + /// Either a URL of the image or the base64 encoded image data. + String get url => throw _privateConstructorUsedError; - /// The line number of the input file where the error occurred, if applicable. - @JsonKey(includeIfNull: false) - int? get line => throw _privateConstructorUsedError; + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + ChatCompletionMessageImageDetail get detail => + throw _privateConstructorUsedError; + /// Serializes this ChatCompletionMessageImageUrl to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $BatchErrorsDataInnerCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ChatCompletionMessageImageUrlCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $BatchErrorsDataInnerCopyWith<$Res> { - factory $BatchErrorsDataInnerCopyWith(BatchErrorsDataInner value, - $Res Function(BatchErrorsDataInner) then) = - _$BatchErrorsDataInnerCopyWithImpl<$Res, BatchErrorsDataInner>; +abstract class $ChatCompletionMessageImageUrlCopyWith<$Res> { + factory $ChatCompletionMessageImageUrlCopyWith( + ChatCompletionMessageImageUrl value, + $Res Function(ChatCompletionMessageImageUrl) then) = + _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + ChatCompletionMessageImageUrl>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? code, - @JsonKey(includeIfNull: false) String? message, - @JsonKey(includeIfNull: false) String? param, - @JsonKey(includeIfNull: false) int? line}); + $Res call({String url, ChatCompletionMessageImageDetail detail}); } /// @nodoc -class _$BatchErrorsDataInnerCopyWithImpl<$Res, - $Val extends BatchErrorsDataInner> - implements $BatchErrorsDataInnerCopyWith<$Res> { - _$BatchErrorsDataInnerCopyWithImpl(this._value, this._then); +class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + $Val extends ChatCompletionMessageImageUrl> + implements $ChatCompletionMessageImageUrlCopyWith<$Res> { + _$ChatCompletionMessageImageUrlCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = freezed, - Object? message = freezed, - Object? param = freezed, - Object? line = freezed, + Object? url = null, + Object? detail = null, }) { return _then(_value.copyWith( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: freezed == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String?, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - line: freezed == line - ? _value.line - : line // ignore: cast_nullable_to_non_nullable - as int?, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + detail: null == detail + ? _value.detail + : detail // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageDetail, ) as $Val); } } /// @nodoc -abstract class _$$BatchErrorsDataInnerImplCopyWith<$Res> - implements $BatchErrorsDataInnerCopyWith<$Res> { - factory _$$BatchErrorsDataInnerImplCopyWith(_$BatchErrorsDataInnerImpl value, - $Res Function(_$BatchErrorsDataInnerImpl) then) = - __$$BatchErrorsDataInnerImplCopyWithImpl<$Res>; +abstract class _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> + implements $ChatCompletionMessageImageUrlCopyWith<$Res> { + factory _$$ChatCompletionMessageImageUrlImplCopyWith( + _$ChatCompletionMessageImageUrlImpl value, + $Res Function(_$ChatCompletionMessageImageUrlImpl) then) = + __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? code, - @JsonKey(includeIfNull: false) String? message, - @JsonKey(includeIfNull: false) String? param, - @JsonKey(includeIfNull: false) int? line}); + $Res call({String url, ChatCompletionMessageImageDetail detail}); } /// @nodoc -class __$$BatchErrorsDataInnerImplCopyWithImpl<$Res> - extends _$BatchErrorsDataInnerCopyWithImpl<$Res, _$BatchErrorsDataInnerImpl> - implements _$$BatchErrorsDataInnerImplCopyWith<$Res> { - __$$BatchErrorsDataInnerImplCopyWithImpl(_$BatchErrorsDataInnerImpl _value, - $Res Function(_$BatchErrorsDataInnerImpl) _then) +class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, + _$ChatCompletionMessageImageUrlImpl> + implements _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> { + __$$ChatCompletionMessageImageUrlImplCopyWithImpl( + _$ChatCompletionMessageImageUrlImpl _value, + $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) : super(_value, _then); + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? code = freezed, - Object? message = freezed, - Object? param = freezed, - Object? line = freezed, + Object? url = null, + Object? detail = null, }) { - return _then(_$BatchErrorsDataInnerImpl( - code: freezed == code - ? _value.code - : code // ignore: cast_nullable_to_non_nullable - as String?, - message: freezed == message - ? _value.message - : message // ignore: cast_nullable_to_non_nullable - as String?, - param: freezed == param - ? _value.param - : param // ignore: cast_nullable_to_non_nullable - as String?, - line: freezed == line - ? _value.line - : line // ignore: cast_nullable_to_non_nullable - as int?, + return _then(_$ChatCompletionMessageImageUrlImpl( + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + detail: null == detail + ? _value.detail + : detail // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageImageDetail, )); } } /// @nodoc @JsonSerializable() -class _$BatchErrorsDataInnerImpl extends _BatchErrorsDataInner { - const _$BatchErrorsDataInnerImpl( - {@JsonKey(includeIfNull: false) this.code, - @JsonKey(includeIfNull: false) this.message, - @JsonKey(includeIfNull: false) this.param, - @JsonKey(includeIfNull: false) this.line}) +class _$ChatCompletionMessageImageUrlImpl + extends _ChatCompletionMessageImageUrl { + const _$ChatCompletionMessageImageUrlImpl( + {required this.url, this.detail = ChatCompletionMessageImageDetail.auto}) : super._(); - factory _$BatchErrorsDataInnerImpl.fromJson(Map json) => - _$$BatchErrorsDataInnerImplFromJson(json); - - /// An error code identifying the error type. - @override - @JsonKey(includeIfNull: false) - final String? code; - - /// A human-readable message providing more details about the error. - @override - @JsonKey(includeIfNull: false) - final String? message; + factory _$ChatCompletionMessageImageUrlImpl.fromJson( + Map json) => + _$$ChatCompletionMessageImageUrlImplFromJson(json); - /// The name of the parameter that caused the error, if applicable. + /// Either a URL of the image or the base64 encoded image data. @override - @JsonKey(includeIfNull: false) - final String? param; + final String url; - /// The line number of the input file where the error occurred, if applicable. + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). @override - @JsonKey(includeIfNull: false) - final int? line; + @JsonKey() + final ChatCompletionMessageImageDetail detail; @override String toString() { - return 'BatchErrorsDataInner(code: $code, message: $message, param: $param, line: $line)'; + return 'ChatCompletionMessageImageUrl(url: $url, detail: $detail)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$BatchErrorsDataInnerImpl && - (identical(other.code, code) || other.code == code) && - (identical(other.message, message) || other.message == message) && - (identical(other.param, param) || other.param == param) && - (identical(other.line, line) || other.line == line)); + other is _$ChatCompletionMessageImageUrlImpl && + (identical(other.url, url) || other.url == url) && + (identical(other.detail, detail) || other.detail == detail)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, code, message, param, line); + int get hashCode => Object.hash(runtimeType, url, detail); - @JsonKey(ignore: true) + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> - get copyWith => - __$$BatchErrorsDataInnerImplCopyWithImpl<_$BatchErrorsDataInnerImpl>( - this, _$identity); + _$$ChatCompletionMessageImageUrlImplCopyWith< + _$ChatCompletionMessageImageUrlImpl> + get copyWith => __$$ChatCompletionMessageImageUrlImplCopyWithImpl< + _$ChatCompletionMessageImageUrlImpl>(this, _$identity); @override Map toJson() { - return _$$BatchErrorsDataInnerImplToJson( + return _$$ChatCompletionMessageImageUrlImplToJson( this, ); } } -abstract class _BatchErrorsDataInner extends BatchErrorsDataInner { - const factory _BatchErrorsDataInner( - {@JsonKey(includeIfNull: false) final String? code, - @JsonKey(includeIfNull: false) final String? message, - @JsonKey(includeIfNull: false) final String? param, - @JsonKey(includeIfNull: false) final int? line}) = - _$BatchErrorsDataInnerImpl; - const _BatchErrorsDataInner._() : super._(); - - factory _BatchErrorsDataInner.fromJson(Map json) = - _$BatchErrorsDataInnerImpl.fromJson; - - @override +abstract class _ChatCompletionMessageImageUrl + extends ChatCompletionMessageImageUrl { + const factory _ChatCompletionMessageImageUrl( + {required final String url, + final ChatCompletionMessageImageDetail detail}) = + _$ChatCompletionMessageImageUrlImpl; + const _ChatCompletionMessageImageUrl._() : super._(); - /// An error code identifying the error type. - @JsonKey(includeIfNull: false) - String? get code; - @override + factory _ChatCompletionMessageImageUrl.fromJson(Map json) = + _$ChatCompletionMessageImageUrlImpl.fromJson; - /// A human-readable message providing more details about the error. - @JsonKey(includeIfNull: false) - String? get message; + /// Either a URL of the image or the base64 encoded image data. @override + String get url; - /// The name of the parameter that caused the error, if applicable. - @JsonKey(includeIfNull: false) - String? get param; + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). @override + ChatCompletionMessageImageDetail get detail; - /// The line number of the input file where the error occurred, if applicable. - @JsonKey(includeIfNull: false) - int? get line; + /// Create a copy of ChatCompletionMessageImageUrl + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$BatchErrorsDataInnerImplCopyWith<_$BatchErrorsDataInnerImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ChatCompletionMessageImageUrlImplCopyWith< + _$ChatCompletionMessageImageUrlImpl> get copyWith => throw _privateConstructorUsedError; } -ListBatchesResponse _$ListBatchesResponseFromJson(Map json) { - return _ListBatchesResponse.fromJson(json); +ResponseFormat _$ResponseFormatFromJson(Map json) { + switch (json['type']) { + case 'text': + return ResponseFormatText.fromJson(json); + case 'json_object': + return ResponseFormatJsonObject.fromJson(json); + case 'json_schema': + return ResponseFormatJsonSchema.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'ResponseFormat', + 'Invalid union type "${json['type']}"!'); + } } /// @nodoc -mixin _$ListBatchesResponse { - /// No Description - List get data => throw _privateConstructorUsedError; - - /// The ID of the first batch in the list. - @JsonKey(name: 'first_id', includeIfNull: false) - String? get firstId => throw _privateConstructorUsedError; - - /// The ID of the last batch in the list. - @JsonKey(name: 'last_id', includeIfNull: false) - String? get lastId => throw _privateConstructorUsedError; - - /// Whether there are more batches available. - @JsonKey(name: 'has_more') - bool get hasMore => throw _privateConstructorUsedError; - - /// The object type, which is always `list`. - ListBatchesResponseObject get object => throw _privateConstructorUsedError; +mixin _$ResponseFormat { + /// The type of response format being defined. + ResponseFormatType get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + /// Serializes this ResponseFormat to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ListBatchesResponseCopyWith get copyWith => + + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ResponseFormatCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ListBatchesResponseCopyWith<$Res> { - factory $ListBatchesResponseCopyWith( - ListBatchesResponse value, $Res Function(ListBatchesResponse) then) = - _$ListBatchesResponseCopyWithImpl<$Res, ListBatchesResponse>; +abstract class $ResponseFormatCopyWith<$Res> { + factory $ResponseFormatCopyWith( + ResponseFormat value, $Res Function(ResponseFormat) then) = + _$ResponseFormatCopyWithImpl<$Res, ResponseFormat>; @useResult - $Res call( - {List data, - @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, - @JsonKey(name: 'has_more') bool hasMore, - ListBatchesResponseObject object}); + $Res call({ResponseFormatType type}); } /// @nodoc -class _$ListBatchesResponseCopyWithImpl<$Res, $Val extends ListBatchesResponse> - implements $ListBatchesResponseCopyWith<$Res> { - _$ListBatchesResponseCopyWithImpl(this._value, this._then); +class _$ResponseFormatCopyWithImpl<$Res, $Val extends ResponseFormat> + implements $ResponseFormatCopyWith<$Res> { + _$ResponseFormatCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, - Object? object = null, + Object? type = null, }) { return _then(_value.copyWith( - data: null == data - ? _value.data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable - as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as ListBatchesResponseObject, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, ) as $Val); } } /// @nodoc -abstract class _$$ListBatchesResponseImplCopyWith<$Res> - implements $ListBatchesResponseCopyWith<$Res> { - factory _$$ListBatchesResponseImplCopyWith(_$ListBatchesResponseImpl value, - $Res Function(_$ListBatchesResponseImpl) then) = - __$$ListBatchesResponseImplCopyWithImpl<$Res>; +abstract class _$$ResponseFormatTextImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatTextImplCopyWith(_$ResponseFormatTextImpl value, + $Res Function(_$ResponseFormatTextImpl) then) = + __$$ResponseFormatTextImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {List data, - @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, - @JsonKey(name: 'has_more') bool hasMore, - ListBatchesResponseObject object}); + $Res call({ResponseFormatType type}); } /// @nodoc -class __$$ListBatchesResponseImplCopyWithImpl<$Res> - extends _$ListBatchesResponseCopyWithImpl<$Res, _$ListBatchesResponseImpl> - implements _$$ListBatchesResponseImplCopyWith<$Res> { - __$$ListBatchesResponseImplCopyWithImpl(_$ListBatchesResponseImpl _value, - $Res Function(_$ListBatchesResponseImpl) _then) +class __$$ResponseFormatTextImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatTextImpl> + implements _$$ResponseFormatTextImplCopyWith<$Res> { + __$$ResponseFormatTextImplCopyWithImpl(_$ResponseFormatTextImpl _value, + $Res Function(_$ResponseFormatTextImpl) _then) : super(_value, _then); + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? data = null, - Object? firstId = freezed, - Object? lastId = freezed, - Object? hasMore = null, - Object? object = null, + Object? type = null, }) { - return _then(_$ListBatchesResponseImpl( - data: null == data - ? _value._data - : data // ignore: cast_nullable_to_non_nullable - as List, - firstId: freezed == firstId - ? _value.firstId - : firstId // ignore: cast_nullable_to_non_nullable - as String?, - lastId: freezed == lastId - ? _value.lastId - : lastId // ignore: cast_nullable_to_non_nullable - as String?, - hasMore: null == hasMore - ? _value.hasMore - : hasMore // ignore: cast_nullable_to_non_nullable - as bool, - object: null == object - ? _value.object - : object // ignore: cast_nullable_to_non_nullable - as ListBatchesResponseObject, + return _then(_$ResponseFormatTextImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, )); } } /// @nodoc @JsonSerializable() -class _$ListBatchesResponseImpl extends _ListBatchesResponse { - const _$ListBatchesResponseImpl( - {required final List data, - @JsonKey(name: 'first_id', includeIfNull: false) this.firstId, - @JsonKey(name: 'last_id', includeIfNull: false) this.lastId, - @JsonKey(name: 'has_more') required this.hasMore, - required this.object}) - : _data = data, - super._(); - - factory _$ListBatchesResponseImpl.fromJson(Map json) => - _$$ListBatchesResponseImplFromJson(json); - - /// No Description - final List _data; - - /// No Description - @override - List get data { - if (_data is EqualUnmodifiableListView) return _data; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_data); - } - - /// The ID of the first batch in the list. - @override - @JsonKey(name: 'first_id', includeIfNull: false) - final String? firstId; - - /// The ID of the last batch in the list. - @override - @JsonKey(name: 'last_id', includeIfNull: false) - final String? lastId; +class _$ResponseFormatTextImpl extends ResponseFormatText { + const _$ResponseFormatTextImpl({this.type = ResponseFormatType.text}) + : super._(); - /// Whether there are more batches available. - @override - @JsonKey(name: 'has_more') - final bool hasMore; + factory _$ResponseFormatTextImpl.fromJson(Map json) => + _$$ResponseFormatTextImplFromJson(json); - /// The object type, which is always `list`. + /// The type of response format being defined. @override - final ListBatchesResponseObject object; + @JsonKey() + final ResponseFormatType type; @override String toString() { - return 'ListBatchesResponse(data: $data, firstId: $firstId, lastId: $lastId, hasMore: $hasMore, object: $object)'; + return 'ResponseFormat.text(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ListBatchesResponseImpl && - const DeepCollectionEquality().equals(other._data, _data) && - (identical(other.firstId, firstId) || other.firstId == firstId) && - (identical(other.lastId, lastId) || other.lastId == lastId) && - (identical(other.hasMore, hasMore) || other.hasMore == hasMore) && - (identical(other.object, object) || other.object == object)); + other is _$ResponseFormatTextImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, - const DeepCollectionEquality().hash(_data), - firstId, - lastId, - hasMore, - object); + int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => - __$$ListBatchesResponseImplCopyWithImpl<_$ListBatchesResponseImpl>( + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + __$$ResponseFormatTextImplCopyWithImpl<_$ResponseFormatTextImpl>( this, _$identity); @override - Map toJson() { - return _$$ListBatchesResponseImplToJson( - this, - ); + @optionalTypeArgs + TResult when({ + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, + }) { + return text(type); } -} - -abstract class _ListBatchesResponse extends ListBatchesResponse { - const factory _ListBatchesResponse( - {required final List data, - @JsonKey(name: 'first_id', includeIfNull: false) final String? firstId, - @JsonKey(name: 'last_id', includeIfNull: false) final String? lastId, - @JsonKey(name: 'has_more') required final bool hasMore, - required final ListBatchesResponseObject - object}) = _$ListBatchesResponseImpl; - const _ListBatchesResponse._() : super._(); - factory _ListBatchesResponse.fromJson(Map json) = - _$ListBatchesResponseImpl.fromJson; - - @override - - /// No Description - List get data; - @override - - /// The ID of the first batch in the list. - @JsonKey(name: 'first_id', includeIfNull: false) - String? get firstId; - @override - - /// The ID of the last batch in the list. - @JsonKey(name: 'last_id', includeIfNull: false) - String? get lastId; - @override - - /// Whether there are more batches available. - @JsonKey(name: 'has_more') - bool get hasMore; - @override - - /// The object type, which is always `list`. - ListBatchesResponseObject get object; @override - @JsonKey(ignore: true) - _$$ListBatchesResponseImplCopyWith<_$ListBatchesResponseImpl> get copyWith => - throw _privateConstructorUsedError; -} - -ChatCompletionMessage _$ChatCompletionMessageFromJson( - Map json) { - switch (json['role']) { - case 'system': - return ChatCompletionSystemMessage.fromJson(json); - case 'user': - return ChatCompletionUserMessage.fromJson(json); - case 'assistant': - return ChatCompletionAssistantMessage.fromJson(json); - case 'tool': - return ChatCompletionToolMessage.fromJson(json); - case 'function': - return ChatCompletionFunctionMessage.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'role', 'ChatCompletionMessage', - 'Invalid union type "${json['role']}"!'); - } -} - -/// @nodoc -mixin _$ChatCompletionMessage { - /// The role of the messages author, in this case `system`. - ChatCompletionMessageRole get role => throw _privateConstructorUsedError; - - /// The contents of the system message. - Object? get content => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, - }) => - throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, - }) => - throw _privateConstructorUsedError; + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, + }) { + return text?.call(type); + } + + @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (text != null) { + return text(type); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, - }) => - throw _privateConstructorUsedError; + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, + }) { + return text(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, - }) => - throw _privateConstructorUsedError; + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, + }) { + return text?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ChatCompletionMessageCopyWith get copyWith => - throw _privateConstructorUsedError; + }) { + if (text != null) { + return text(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ResponseFormatTextImplToJson( + this, + ); + } } -/// @nodoc -abstract class $ChatCompletionMessageCopyWith<$Res> { - factory $ChatCompletionMessageCopyWith(ChatCompletionMessage value, - $Res Function(ChatCompletionMessage) then) = - _$ChatCompletionMessageCopyWithImpl<$Res, ChatCompletionMessage>; - @useResult - $Res call({ChatCompletionMessageRole role}); -} +abstract class ResponseFormatText extends ResponseFormat { + const factory ResponseFormatText({final ResponseFormatType type}) = + _$ResponseFormatTextImpl; + const ResponseFormatText._() : super._(); -/// @nodoc -class _$ChatCompletionMessageCopyWithImpl<$Res, - $Val extends ChatCompletionMessage> - implements $ChatCompletionMessageCopyWith<$Res> { - _$ChatCompletionMessageCopyWithImpl(this._value, this._then); + factory ResponseFormatText.fromJson(Map json) = + _$ResponseFormatTextImpl.fromJson; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The type of response format being defined. + @override + ResponseFormatType get type; - @pragma('vm:prefer-inline') + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - $Res call({ - Object? role = null, - }) { - return _then(_value.copyWith( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - ) as $Val); - } + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatTextImplCopyWith<_$ResponseFormatTextImpl> get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionSystemMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionSystemMessageImplCopyWith( - _$ChatCompletionSystemMessageImpl value, - $Res Function(_$ChatCompletionSystemMessageImpl) then) = - __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res>; +abstract class _$$ResponseFormatJsonObjectImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonObjectImplCopyWith( + _$ResponseFormatJsonObjectImpl value, + $Res Function(_$ResponseFormatJsonObjectImpl) then) = + __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {ChatCompletionMessageRole role, - String content, - @JsonKey(includeIfNull: false) String? name}); + $Res call({ResponseFormatType type}); } /// @nodoc -class __$$ChatCompletionSystemMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionSystemMessageImpl> - implements _$$ChatCompletionSystemMessageImplCopyWith<$Res> { - __$$ChatCompletionSystemMessageImplCopyWithImpl( - _$ChatCompletionSystemMessageImpl _value, - $Res Function(_$ChatCompletionSystemMessageImpl) _then) +class __$$ResponseFormatJsonObjectImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonObjectImpl> + implements _$$ResponseFormatJsonObjectImplCopyWith<$Res> { + __$$ResponseFormatJsonObjectImplCopyWithImpl( + _$ResponseFormatJsonObjectImpl _value, + $Res Function(_$ResponseFormatJsonObjectImpl) _then) : super(_value, _then); + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = null, - Object? name = freezed, + Object? type = null, }) { - return _then(_$ChatCompletionSystemMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, + return _then(_$ResponseFormatJsonObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { - const _$ChatCompletionSystemMessageImpl( - {this.role = ChatCompletionMessageRole.system, - required this.content, - @JsonKey(includeIfNull: false) this.name}) +class _$ResponseFormatJsonObjectImpl extends ResponseFormatJsonObject { + const _$ResponseFormatJsonObjectImpl( + {this.type = ResponseFormatType.jsonObject}) : super._(); - factory _$ChatCompletionSystemMessageImpl.fromJson( - Map json) => - _$$ChatCompletionSystemMessageImplFromJson(json); + factory _$ResponseFormatJsonObjectImpl.fromJson(Map json) => + _$$ResponseFormatJsonObjectImplFromJson(json); - /// The role of the messages author, in this case `system`. + /// The type of response format being defined. @override @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the system message. - @override - final String content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @override - @JsonKey(includeIfNull: false) - final String? name; + final ResponseFormatType type; @override String toString() { - return 'ChatCompletionMessage.system(role: $role, content: $content, name: $name)'; + return 'ResponseFormat.jsonObject(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionSystemMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); + other is _$ResponseFormatJsonObjectImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name); + int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> - get copyWith => __$$ChatCompletionSystemMessageImplCopyWithImpl< - _$ChatCompletionSystemMessageImpl>(this, _$identity); + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> + get copyWith => __$$ResponseFormatJsonObjectImplCopyWithImpl< + _$ResponseFormatJsonObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, }) { - return system(role, content, name); + return jsonObject(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, }) { - return system?.call(role, content, name); + return jsonObject?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, required TResult orElse(), }) { - if (system != null) { - return system(role, content, name); + if (jsonObject != null) { + return jsonObject(type); } return orElse(); } @@ -50357,287 +58502,206 @@ class _$ChatCompletionSystemMessageImpl extends ChatCompletionSystemMessage { @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, }) { - return system(this); + return jsonObject(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, }) { - return system?.call(this); + return jsonObject?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, required TResult orElse(), }) { - if (system != null) { - return system(this); + if (jsonObject != null) { + return jsonObject(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionSystemMessageImplToJson( + return _$$ResponseFormatJsonObjectImplToJson( this, ); } } -abstract class ChatCompletionSystemMessage extends ChatCompletionMessage { - const factory ChatCompletionSystemMessage( - {final ChatCompletionMessageRole role, - required final String content, - @JsonKey(includeIfNull: false) final String? name}) = - _$ChatCompletionSystemMessageImpl; - const ChatCompletionSystemMessage._() : super._(); - - factory ChatCompletionSystemMessage.fromJson(Map json) = - _$ChatCompletionSystemMessageImpl.fromJson; +abstract class ResponseFormatJsonObject extends ResponseFormat { + const factory ResponseFormatJsonObject({final ResponseFormatType type}) = + _$ResponseFormatJsonObjectImpl; + const ResponseFormatJsonObject._() : super._(); - @override + factory ResponseFormatJsonObject.fromJson(Map json) = + _$ResponseFormatJsonObjectImpl.fromJson; - /// The role of the messages author, in this case `system`. - ChatCompletionMessageRole get role; + /// The type of response format being defined. @override + ResponseFormatType get type; - /// The contents of the system message. - String get content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionSystemMessageImplCopyWith<_$ChatCompletionSystemMessageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonObjectImplCopyWith<_$ResponseFormatJsonObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionUserMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionUserMessageImplCopyWith( - _$ChatCompletionUserMessageImpl value, - $Res Function(_$ChatCompletionUserMessageImpl) then) = - __$$ChatCompletionUserMessageImplCopyWithImpl<$Res>; +abstract class _$$ResponseFormatJsonSchemaImplCopyWith<$Res> + implements $ResponseFormatCopyWith<$Res> { + factory _$$ResponseFormatJsonSchemaImplCopyWith( + _$ResponseFormatJsonSchemaImpl value, + $Res Function(_$ResponseFormatJsonSchemaImpl) then) = + __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res>; @override @useResult $Res call( - {ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name}); + {ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema}); - $ChatCompletionUserMessageContentCopyWith<$Res> get content; + $JsonSchemaObjectCopyWith<$Res> get jsonSchema; } /// @nodoc -class __$$ChatCompletionUserMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionUserMessageImpl> - implements _$$ChatCompletionUserMessageImplCopyWith<$Res> { - __$$ChatCompletionUserMessageImplCopyWithImpl( - _$ChatCompletionUserMessageImpl _value, - $Res Function(_$ChatCompletionUserMessageImpl) _then) +class __$$ResponseFormatJsonSchemaImplCopyWithImpl<$Res> + extends _$ResponseFormatCopyWithImpl<$Res, _$ResponseFormatJsonSchemaImpl> + implements _$$ResponseFormatJsonSchemaImplCopyWith<$Res> { + __$$ResponseFormatJsonSchemaImplCopyWithImpl( + _$ResponseFormatJsonSchemaImpl _value, + $Res Function(_$ResponseFormatJsonSchemaImpl) _then) : super(_value, _then); + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = null, - Object? name = freezed, + Object? type = null, + Object? jsonSchema = null, }) { - return _then(_$ChatCompletionUserMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as ChatCompletionUserMessageContent, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, + return _then(_$ResponseFormatJsonSchemaImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as ResponseFormatType, + jsonSchema: null == jsonSchema + ? _value.jsonSchema + : jsonSchema // ignore: cast_nullable_to_non_nullable + as JsonSchemaObject, )); } + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChatCompletionUserMessageContentCopyWith<$Res> get content { - return $ChatCompletionUserMessageContentCopyWith<$Res>(_value.content, - (value) { - return _then(_value.copyWith(content: value)); + $JsonSchemaObjectCopyWith<$Res> get jsonSchema { + return $JsonSchemaObjectCopyWith<$Res>(_value.jsonSchema, (value) { + return _then(_value.copyWith(jsonSchema: value)); }); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { - const _$ChatCompletionUserMessageImpl( - {this.role = ChatCompletionMessageRole.user, - @_ChatCompletionUserMessageContentConverter() required this.content, - @JsonKey(includeIfNull: false) this.name}) +class _$ResponseFormatJsonSchemaImpl extends ResponseFormatJsonSchema { + const _$ResponseFormatJsonSchemaImpl( + {this.type = ResponseFormatType.jsonSchema, + @JsonKey(name: 'json_schema') required this.jsonSchema}) : super._(); - factory _$ChatCompletionUserMessageImpl.fromJson(Map json) => - _$$ChatCompletionUserMessageImplFromJson(json); + factory _$ResponseFormatJsonSchemaImpl.fromJson(Map json) => + _$$ResponseFormatJsonSchemaImplFromJson(json); - /// The role of the messages author, in this case `user`. + /// The type of response format being defined. @override @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the user message. - @override - @_ChatCompletionUserMessageContentConverter() - final ChatCompletionUserMessageContent content; + final ResponseFormatType type; - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + /// A JSON Schema object. @override - @JsonKey(includeIfNull: false) - final String? name; + @JsonKey(name: 'json_schema') + final JsonSchemaObject jsonSchema; @override String toString() { - return 'ChatCompletionMessage.user(role: $role, content: $content, name: $name)'; + return 'ResponseFormat.jsonSchema(type: $type, jsonSchema: $jsonSchema)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionUserMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); + other is _$ResponseFormatJsonSchemaImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.jsonSchema, jsonSchema) || + other.jsonSchema == jsonSchema)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name); + int get hashCode => Object.hash(runtimeType, type, jsonSchema); - @JsonKey(ignore: true) + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> - get copyWith => __$$ChatCompletionUserMessageImplCopyWithImpl< - _$ChatCompletionUserMessageImpl>(this, _$identity); + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> + get copyWith => __$$ResponseFormatJsonSchemaImplCopyWithImpl< + _$ResponseFormatJsonSchemaImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, - required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, + required TResult Function(ResponseFormatType type) text, + required TResult Function(ResponseFormatType type) jsonObject, + required TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema) + jsonSchema, }) { - return user(role, content, name); + return jsonSchema(type, this.jsonSchema); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + TResult? Function(ResponseFormatType type)? text, + TResult? Function(ResponseFormatType type)? jsonObject, + TResult? Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, }) { - return user?.call(role, content, name); + return jsonSchema?.call(type, this.jsonSchema); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, - TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + TResult Function(ResponseFormatType type)? text, + TResult Function(ResponseFormatType type)? jsonObject, + TResult Function(ResponseFormatType type, + @JsonKey(name: 'json_schema') JsonSchemaObject jsonSchema)? + jsonSchema, required TResult orElse(), }) { - if (user != null) { - return user(role, content, name); + if (jsonSchema != null) { + return jsonSchema(type, this.jsonSchema); } return orElse(); } @@ -50645,337 +58709,317 @@ class _$ChatCompletionUserMessageImpl extends ChatCompletionUserMessage { @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, + required TResult Function(ResponseFormatText value) text, + required TResult Function(ResponseFormatJsonObject value) jsonObject, + required TResult Function(ResponseFormatJsonSchema value) jsonSchema, }) { - return user(this); + return jsonSchema(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, + TResult? Function(ResponseFormatText value)? text, + TResult? Function(ResponseFormatJsonObject value)? jsonObject, + TResult? Function(ResponseFormatJsonSchema value)? jsonSchema, }) { - return user?.call(this); + return jsonSchema?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(ResponseFormatText value)? text, + TResult Function(ResponseFormatJsonObject value)? jsonObject, + TResult Function(ResponseFormatJsonSchema value)? jsonSchema, required TResult orElse(), }) { - if (user != null) { - return user(this); + if (jsonSchema != null) { + return jsonSchema(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionUserMessageImplToJson( + return _$$ResponseFormatJsonSchemaImplToJson( this, ); } } -abstract class ChatCompletionUserMessage extends ChatCompletionMessage { - const factory ChatCompletionUserMessage( - {final ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - required final ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) final String? name}) = - _$ChatCompletionUserMessageImpl; - const ChatCompletionUserMessage._() : super._(); - - factory ChatCompletionUserMessage.fromJson(Map json) = - _$ChatCompletionUserMessageImpl.fromJson; +abstract class ResponseFormatJsonSchema extends ResponseFormat { + const factory ResponseFormatJsonSchema( + {final ResponseFormatType type, + @JsonKey(name: 'json_schema') + required final JsonSchemaObject jsonSchema}) = + _$ResponseFormatJsonSchemaImpl; + const ResponseFormatJsonSchema._() : super._(); - @override + factory ResponseFormatJsonSchema.fromJson(Map json) = + _$ResponseFormatJsonSchemaImpl.fromJson; - /// The role of the messages author, in this case `user`. - ChatCompletionMessageRole get role; + /// The type of response format being defined. @override + ResponseFormatType get type; - /// The contents of the user message. - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent get content; + /// A JSON Schema object. + @JsonKey(name: 'json_schema') + JsonSchemaObject get jsonSchema; - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; + /// Create a copy of ResponseFormat + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionUserMessageImplCopyWith<_$ChatCompletionUserMessageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$ResponseFormatJsonSchemaImplCopyWith<_$ResponseFormatJsonSchemaImpl> get copyWith => throw _privateConstructorUsedError; } +AssistantTools _$AssistantToolsFromJson(Map json) { + switch (json['type']) { + case 'code_interpreter': + return AssistantToolsCodeInterpreter.fromJson(json); + case 'file_search': + return AssistantToolsFileSearch.fromJson(json); + case 'function': + return AssistantToolsFunction.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'AssistantTools', + 'Invalid union type "${json['type']}"!'); + } +} + /// @nodoc -abstract class _$$ChatCompletionAssistantMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionAssistantMessageImplCopyWith( - _$ChatCompletionAssistantMessageImpl value, - $Res Function(_$ChatCompletionAssistantMessageImpl) then) = - __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res>; +mixin _$AssistantTools { + /// The type of tool being defined: `code_interpreter` + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String type) codeInterpreter, + required TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, + required TResult Function(String type, FunctionObject function) function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type)? codeInterpreter, + TResult? Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult? Function(String type, FunctionObject function)? function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type)? codeInterpreter, + TResult Function( + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult Function(String type, FunctionObject function)? function, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this AssistantTools to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $AssistantToolsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $AssistantToolsCopyWith<$Res> { + factory $AssistantToolsCopyWith( + AssistantTools value, $Res Function(AssistantTools) then) = + _$AssistantToolsCopyWithImpl<$Res, AssistantTools>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> + implements $AssistantToolsCopyWith<$Res> { + _$AssistantToolsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsCodeInterpreterImplCopyWith( + _$AssistantToolsCodeInterpreterImpl value, + $Res Function(_$AssistantToolsCodeInterpreterImpl) then) = + __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall}); - - $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; + $Res call({String type}); } /// @nodoc -class __$$ChatCompletionAssistantMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionAssistantMessageImpl> - implements _$$ChatCompletionAssistantMessageImplCopyWith<$Res> { - __$$ChatCompletionAssistantMessageImplCopyWithImpl( - _$ChatCompletionAssistantMessageImpl _value, - $Res Function(_$ChatCompletionAssistantMessageImpl) _then) +class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, + _$AssistantToolsCodeInterpreterImpl> + implements _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> { + __$$AssistantToolsCodeInterpreterImplCopyWithImpl( + _$AssistantToolsCodeInterpreterImpl _value, + $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = freezed, - Object? name = freezed, - Object? toolCalls = freezed, - Object? functionCall = freezed, + Object? type = null, }) { - return _then(_$ChatCompletionAssistantMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: freezed == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String?, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - toolCalls: freezed == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, - functionCall: freezed == functionCall - ? _value.functionCall - : functionCall // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageFunctionCall?, + return _then(_$AssistantToolsCodeInterpreterImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, )); } - - @override - @pragma('vm:prefer-inline') - $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { - if (_value.functionCall == null) { - return null; - } - - return $ChatCompletionMessageFunctionCallCopyWith<$Res>( - _value.functionCall!, (value) { - return _then(_value.copyWith(functionCall: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$ChatCompletionAssistantMessageImpl - extends ChatCompletionAssistantMessage { - const _$ChatCompletionAssistantMessageImpl( - {this.role = ChatCompletionMessageRole.assistant, - @JsonKey(includeIfNull: false) this.content, - @JsonKey(includeIfNull: false) this.name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall}) - : _toolCalls = toolCalls, - super._(); +class _$AssistantToolsCodeInterpreterImpl + extends AssistantToolsCodeInterpreter { + const _$AssistantToolsCodeInterpreterImpl({this.type = 'code_interpreter'}) + : super._(); - factory _$ChatCompletionAssistantMessageImpl.fromJson( + factory _$AssistantToolsCodeInterpreterImpl.fromJson( Map json) => - _$$ChatCompletionAssistantMessageImplFromJson(json); + _$$AssistantToolsCodeInterpreterImplFromJson(json); - /// The role of the messages author, in this case `assistant`. + /// The type of tool being defined: `code_interpreter` @override @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - @override - @JsonKey(includeIfNull: false) - final String? content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @override - @JsonKey(includeIfNull: false) - final String? name; - - /// No Description - final List? _toolCalls; - - /// No Description - @override - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls { - final value = _toolCalls; - if (value == null) return null; - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } - - /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - @override - @JsonKey(name: 'function_call', includeIfNull: false) - final ChatCompletionMessageFunctionCall? functionCall; + final String type; @override String toString() { - return 'ChatCompletionMessage.assistant(role: $role, content: $content, name: $name, toolCalls: $toolCalls, functionCall: $functionCall)'; + return 'AssistantTools.codeInterpreter(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionAssistantMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls) && - (identical(other.functionCall, functionCall) || - other.functionCall == functionCall)); + other is _$AssistantToolsCodeInterpreterImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name, - const DeepCollectionEquality().hash(_toolCalls), functionCall); + int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionAssistantMessageImplCopyWith< - _$ChatCompletionAssistantMessageImpl> - get copyWith => __$$ChatCompletionAssistantMessageImplCopyWithImpl< - _$ChatCompletionAssistantMessageImpl>(this, _$identity); + _$$AssistantToolsCodeInterpreterImplCopyWith< + _$AssistantToolsCodeInterpreterImpl> + get copyWith => __$$AssistantToolsCodeInterpreterImplCopyWithImpl< + _$AssistantToolsCodeInterpreterImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, + required TResult Function(String type) codeInterpreter, required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return assistant(role, content, name, toolCalls, functionCall); + return codeInterpreter(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult? Function(String type)? codeInterpreter, TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return assistant?.call(role, content, name, toolCalls, functionCall); + return codeInterpreter?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult Function(String type)? codeInterpreter, TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { - if (assistant != null) { - return assistant(role, content, name, toolCalls, functionCall); + if (codeInterpreter != null) { + return codeInterpreter(type); } return orElse(); } @@ -50983,289 +59027,219 @@ class _$ChatCompletionAssistantMessageImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { - return assistant(this); + return codeInterpreter(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { - return assistant?.call(this); + return codeInterpreter?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { - if (assistant != null) { - return assistant(this); + if (codeInterpreter != null) { + return codeInterpreter(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionAssistantMessageImplToJson( + return _$$AssistantToolsCodeInterpreterImplToJson( this, ); } } -abstract class ChatCompletionAssistantMessage extends ChatCompletionMessage { - const factory ChatCompletionAssistantMessage( - {final ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) final String? content, - @JsonKey(includeIfNull: false) final String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - final ChatCompletionMessageFunctionCall? functionCall}) = - _$ChatCompletionAssistantMessageImpl; - const ChatCompletionAssistantMessage._() : super._(); - - factory ChatCompletionAssistantMessage.fromJson(Map json) = - _$ChatCompletionAssistantMessageImpl.fromJson; +abstract class AssistantToolsCodeInterpreter extends AssistantTools { + const factory AssistantToolsCodeInterpreter({final String type}) = + _$AssistantToolsCodeInterpreterImpl; + const AssistantToolsCodeInterpreter._() : super._(); - @override + factory AssistantToolsCodeInterpreter.fromJson(Map json) = + _$AssistantToolsCodeInterpreterImpl.fromJson; - /// The role of the messages author, in this case `assistant`. - ChatCompletionMessageRole get role; + /// The type of tool being defined: `code_interpreter` @override + String get type; - /// The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - @JsonKey(includeIfNull: false) - String? get content; - - /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. - @JsonKey(includeIfNull: false) - String? get name; - - /// No Description - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls; - - /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? get functionCall; + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionAssistantMessageImplCopyWith< - _$ChatCompletionAssistantMessageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AssistantToolsCodeInterpreterImplCopyWith< + _$AssistantToolsCodeInterpreterImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionToolMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionToolMessageImplCopyWith( - _$ChatCompletionToolMessageImpl value, - $Res Function(_$ChatCompletionToolMessageImpl) then) = - __$$ChatCompletionToolMessageImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsFileSearchImplCopyWith( + _$AssistantToolsFileSearchImpl value, + $Res Function(_$AssistantToolsFileSearchImpl) then) = + __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; @override @useResult $Res call( - {ChatCompletionMessageRole role, - String content, - @JsonKey(name: 'tool_call_id') String toolCallId}); + {String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch}); + + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch; } /// @nodoc -class __$$ChatCompletionToolMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionToolMessageImpl> - implements _$$ChatCompletionToolMessageImplCopyWith<$Res> { - __$$ChatCompletionToolMessageImplCopyWithImpl( - _$ChatCompletionToolMessageImpl _value, - $Res Function(_$ChatCompletionToolMessageImpl) _then) +class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFileSearchImpl> + implements _$$AssistantToolsFileSearchImplCopyWith<$Res> { + __$$AssistantToolsFileSearchImplCopyWithImpl( + _$AssistantToolsFileSearchImpl _value, + $Res Function(_$AssistantToolsFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = null, - Object? toolCallId = null, + Object? type = null, + Object? fileSearch = freezed, }) { - return _then(_$ChatCompletionToolMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: null == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String, - toolCallId: null == toolCallId - ? _value.toolCallId - : toolCallId // ignore: cast_nullable_to_non_nullable + return _then(_$AssistantToolsFileSearchImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, + fileSearch: freezed == fileSearch + ? _value.fileSearch + : fileSearch // ignore: cast_nullable_to_non_nullable + as AssistantToolsFileSearchFileSearch?, )); } + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $AssistantToolsFileSearchFileSearchCopyWith<$Res>? get fileSearch { + if (_value.fileSearch == null) { + return null; + } + + return $AssistantToolsFileSearchFileSearchCopyWith<$Res>(_value.fileSearch!, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { - const _$ChatCompletionToolMessageImpl( - {this.role = ChatCompletionMessageRole.tool, - required this.content, - @JsonKey(name: 'tool_call_id') required this.toolCallId}) +class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { + const _$AssistantToolsFileSearchImpl( + {required this.type, + @JsonKey(name: 'file_search', includeIfNull: false) this.fileSearch}) : super._(); - factory _$ChatCompletionToolMessageImpl.fromJson(Map json) => - _$$ChatCompletionToolMessageImplFromJson(json); - - /// The role of the messages author, in this case `tool`. - @override - @JsonKey() - final ChatCompletionMessageRole role; + factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => + _$$AssistantToolsFileSearchImplFromJson(json); - /// The contents of the tool message. + /// The type of tool being defined: `file_search` @override - final String content; + final String type; - /// Tool call that this message is responding to. + /// Overrides for the file search tool. @override - @JsonKey(name: 'tool_call_id') - final String toolCallId; + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch; @override String toString() { - return 'ChatCompletionMessage.tool(role: $role, content: $content, toolCallId: $toolCallId)'; + return 'AssistantTools.fileSearch(type: $type, fileSearch: $fileSearch)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionToolMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.toolCallId, toolCallId) || - other.toolCallId == toolCallId)); + other is _$AssistantToolsFileSearchImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, toolCallId); + int get hashCode => Object.hash(runtimeType, type, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> - get copyWith => __$$ChatCompletionToolMessageImplCopyWithImpl< - _$ChatCompletionToolMessageImpl>(this, _$identity); + _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> + get copyWith => __$$AssistantToolsFileSearchImplCopyWithImpl< + _$AssistantToolsFileSearchImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, + required TResult Function(String type) codeInterpreter, required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return tool(role, content, toolCallId); + return fileSearch(type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult? Function(String type)? codeInterpreter, TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return tool?.call(role, content, toolCallId); + return fileSearch?.call(type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult Function(String type)? codeInterpreter, TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), - }) { - if (tool != null) { - return tool(role, content, toolCallId); + }) { + if (fileSearch != null) { + return fileSearch(type, this.fileSearch); } return orElse(); } @@ -51273,273 +59247,216 @@ class _$ChatCompletionToolMessageImpl extends ChatCompletionToolMessage { @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { - return tool(this); + return fileSearch(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { - return tool?.call(this); + return fileSearch?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { - if (tool != null) { - return tool(this); + if (fileSearch != null) { + return fileSearch(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionToolMessageImplToJson( + return _$$AssistantToolsFileSearchImplToJson( this, ); } } -abstract class ChatCompletionToolMessage extends ChatCompletionMessage { - const factory ChatCompletionToolMessage( - {final ChatCompletionMessageRole role, - required final String content, - @JsonKey(name: 'tool_call_id') required final String toolCallId}) = - _$ChatCompletionToolMessageImpl; - const ChatCompletionToolMessage._() : super._(); - - factory ChatCompletionToolMessage.fromJson(Map json) = - _$ChatCompletionToolMessageImpl.fromJson; +abstract class AssistantToolsFileSearch extends AssistantTools { + const factory AssistantToolsFileSearch( + {required final String type, + @JsonKey(name: 'file_search', includeIfNull: false) + final AssistantToolsFileSearchFileSearch? fileSearch}) = + _$AssistantToolsFileSearchImpl; + const AssistantToolsFileSearch._() : super._(); - @override + factory AssistantToolsFileSearch.fromJson(Map json) = + _$AssistantToolsFileSearchImpl.fromJson; - /// The role of the messages author, in this case `tool`. - ChatCompletionMessageRole get role; + /// The type of tool being defined: `file_search` @override + String get type; - /// The contents of the tool message. - String get content; + /// Overrides for the file search tool. + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? get fileSearch; - /// Tool call that this message is responding to. - @JsonKey(name: 'tool_call_id') - String get toolCallId; + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionToolMessageImplCopyWith<_$ChatCompletionToolMessageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionFunctionMessageImplCopyWith<$Res> - implements $ChatCompletionMessageCopyWith<$Res> { - factory _$$ChatCompletionFunctionMessageImplCopyWith( - _$ChatCompletionFunctionMessageImpl value, - $Res Function(_$ChatCompletionFunctionMessageImpl) then) = - __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsFunctionImplCopyWith<$Res> + implements $AssistantToolsCopyWith<$Res> { + factory _$$AssistantToolsFunctionImplCopyWith( + _$AssistantToolsFunctionImpl value, + $Res Function(_$AssistantToolsFunctionImpl) then) = + __$$AssistantToolsFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call({ChatCompletionMessageRole role, String? content, String name}); + $Res call({String type, FunctionObject function}); + + $FunctionObjectCopyWith<$Res> get function; } /// @nodoc -class __$$ChatCompletionFunctionMessageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageCopyWithImpl<$Res, - _$ChatCompletionFunctionMessageImpl> - implements _$$ChatCompletionFunctionMessageImplCopyWith<$Res> { - __$$ChatCompletionFunctionMessageImplCopyWithImpl( - _$ChatCompletionFunctionMessageImpl _value, - $Res Function(_$ChatCompletionFunctionMessageImpl) _then) +class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> + extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFunctionImpl> + implements _$$AssistantToolsFunctionImplCopyWith<$Res> { + __$$AssistantToolsFunctionImplCopyWithImpl( + _$AssistantToolsFunctionImpl _value, + $Res Function(_$AssistantToolsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? role = null, - Object? content = freezed, - Object? name = null, + Object? type = null, + Object? function = null, }) { - return _then(_$ChatCompletionFunctionMessageImpl( - role: null == role - ? _value.role - : role // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageRole, - content: freezed == content - ? _value.content - : content // ignore: cast_nullable_to_non_nullable - as String?, - name: null == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable + return _then(_$AssistantToolsFunctionImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, + function: null == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as FunctionObject, )); } + + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $FunctionObjectCopyWith<$Res> get function { + return $FunctionObjectCopyWith<$Res>(_value.function, (value) { + return _then(_value.copyWith(function: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$ChatCompletionFunctionMessageImpl - extends ChatCompletionFunctionMessage { - const _$ChatCompletionFunctionMessageImpl( - {this.role = ChatCompletionMessageRole.function, - required this.content, - required this.name}) +class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { + const _$AssistantToolsFunctionImpl( + {this.type = 'function', required this.function}) : super._(); - factory _$ChatCompletionFunctionMessageImpl.fromJson( - Map json) => - _$$ChatCompletionFunctionMessageImplFromJson(json); + factory _$AssistantToolsFunctionImpl.fromJson(Map json) => + _$$AssistantToolsFunctionImplFromJson(json); - /// The role of the messages author, in this case `function`. + /// The type of tool being defined: `function` @override @JsonKey() - final ChatCompletionMessageRole role; - - /// The contents of the function message. - @override - final String? content; + final String type; - /// The name of the function to call. + /// A function that the model may call. @override - final String name; + final FunctionObject function; @override String toString() { - return 'ChatCompletionMessage.function(role: $role, content: $content, name: $name)'; + return 'AssistantTools.function(type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionFunctionMessageImpl && - (identical(other.role, role) || other.role == role) && - (identical(other.content, content) || other.content == content) && - (identical(other.name, name) || other.name == name)); + other is _$AssistantToolsFunctionImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.function, function) || + other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, role, content, name); + int get hashCode => Object.hash(runtimeType, type, function); - @JsonKey(ignore: true) + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionFunctionMessageImplCopyWith< - _$ChatCompletionFunctionMessageImpl> - get copyWith => __$$ChatCompletionFunctionMessageImplCopyWithImpl< - _$ChatCompletionFunctionMessageImpl>(this, _$identity); + _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> + get copyWith => __$$AssistantToolsFunctionImplCopyWithImpl< + _$AssistantToolsFunctionImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name) - system, - required TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name) - user, - required TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall) - assistant, - required TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId) - tool, + required TResult Function(String type) codeInterpreter, required TResult Function( - ChatCompletionMessageRole role, String? content, String name) - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch) + fileSearch, + required TResult Function(String type, FunctionObject function) function, }) { - return function(role, content, name); + return function(type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult? Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult? Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult? Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult? Function(String type)? codeInterpreter, TResult? Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult? Function(String type, FunctionObject function)? function, }) { - return function?.call(role, content, name); + return function?.call(type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(includeIfNull: false) String? name)? - system, - TResult Function( - ChatCompletionMessageRole role, - @_ChatCompletionUserMessageContentConverter() - ChatCompletionUserMessageContent content, - @JsonKey(includeIfNull: false) String? name)? - user, - TResult Function( - ChatCompletionMessageRole role, - @JsonKey(includeIfNull: false) String? content, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls, - @JsonKey(name: 'function_call', includeIfNull: false) - ChatCompletionMessageFunctionCall? functionCall)? - assistant, - TResult Function(ChatCompletionMessageRole role, String content, - @JsonKey(name: 'tool_call_id') String toolCallId)? - tool, + TResult Function(String type)? codeInterpreter, TResult Function( - ChatCompletionMessageRole role, String? content, String name)? - function, + String type, + @JsonKey(name: 'file_search', includeIfNull: false) + AssistantToolsFileSearchFileSearch? fileSearch)? + fileSearch, + TResult Function(String type, FunctionObject function)? function, required TResult orElse(), }) { if (function != null) { - return function(role, content, name); + return function(type, this.function); } return orElse(); } @@ -51547,11 +59464,10 @@ class _$ChatCompletionFunctionMessageImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionSystemMessage value) system, - required TResult Function(ChatCompletionUserMessage value) user, - required TResult Function(ChatCompletionAssistantMessage value) assistant, - required TResult Function(ChatCompletionToolMessage value) tool, - required TResult Function(ChatCompletionFunctionMessage value) function, + required TResult Function(AssistantToolsCodeInterpreter value) + codeInterpreter, + required TResult Function(AssistantToolsFileSearch value) fileSearch, + required TResult Function(AssistantToolsFunction value) function, }) { return function(this); } @@ -51559,11 +59475,9 @@ class _$ChatCompletionFunctionMessageImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionSystemMessage value)? system, - TResult? Function(ChatCompletionUserMessage value)? user, - TResult? Function(ChatCompletionAssistantMessage value)? assistant, - TResult? Function(ChatCompletionToolMessage value)? tool, - TResult? Function(ChatCompletionFunctionMessage value)? function, + TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult? Function(AssistantToolsFileSearch value)? fileSearch, + TResult? Function(AssistantToolsFunction value)? function, }) { return function?.call(this); } @@ -51571,11 +59485,9 @@ class _$ChatCompletionFunctionMessageImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionSystemMessage value)? system, - TResult Function(ChatCompletionUserMessage value)? user, - TResult Function(ChatCompletionAssistantMessage value)? assistant, - TResult Function(ChatCompletionToolMessage value)? tool, - TResult Function(ChatCompletionFunctionMessage value)? function, + TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, + TResult Function(AssistantToolsFileSearch value)? fileSearch, + TResult Function(AssistantToolsFunction value)? function, required TResult orElse(), }) { if (function != null) { @@ -51586,569 +59498,415 @@ class _$ChatCompletionFunctionMessageImpl @override Map toJson() { - return _$$ChatCompletionFunctionMessageImplToJson( + return _$$AssistantToolsFunctionImplToJson( this, ); } } -abstract class ChatCompletionFunctionMessage extends ChatCompletionMessage { - const factory ChatCompletionFunctionMessage( - {final ChatCompletionMessageRole role, - required final String? content, - required final String name}) = _$ChatCompletionFunctionMessageImpl; - const ChatCompletionFunctionMessage._() : super._(); - - factory ChatCompletionFunctionMessage.fromJson(Map json) = - _$ChatCompletionFunctionMessageImpl.fromJson; +abstract class AssistantToolsFunction extends AssistantTools { + const factory AssistantToolsFunction( + {final String type, + required final FunctionObject function}) = _$AssistantToolsFunctionImpl; + const AssistantToolsFunction._() : super._(); - @override + factory AssistantToolsFunction.fromJson(Map json) = + _$AssistantToolsFunctionImpl.fromJson; - /// The role of the messages author, in this case `function`. - ChatCompletionMessageRole get role; + /// The type of tool being defined: `function` @override + String get type; - /// The contents of the function message. - String? get content; + /// A function that the model may call. + FunctionObject get function; - /// The name of the function to call. - String get name; + /// Create a copy of AssistantTools + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionFunctionMessageImplCopyWith< - _$ChatCompletionFunctionMessageImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -ChatCompletionUserMessageContent _$ChatCompletionUserMessageContentFromJson( +AssistantToolsFileSearchFileSearch _$AssistantToolsFileSearchFileSearchFromJson( Map json) { - switch (json['runtimeType']) { - case 'parts': - return ChatCompletionMessageContentParts.fromJson(json); - case 'string': - return ChatCompletionUserMessageContentString.fromJson(json); - - default: - throw CheckedFromJsonException( - json, - 'runtimeType', - 'ChatCompletionUserMessageContent', - 'Invalid union type "${json['runtimeType']}"!'); - } + return _AssistantToolsFileSearchFileSearch.fromJson(json); } /// @nodoc -mixin _$ChatCompletionUserMessageContent { - Object get value => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) => +mixin _$AssistantToolsFileSearchFileSearch { + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults => throw _privateConstructorUsedError; + + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions => throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $ChatCompletionUserMessageContentCopyWith<$Res> { - factory $ChatCompletionUserMessageContentCopyWith( - ChatCompletionUserMessageContent value, - $Res Function(ChatCompletionUserMessageContent) then) = - _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - ChatCompletionUserMessageContent>; -} - -/// @nodoc -class _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - $Val extends ChatCompletionUserMessageContent> - implements $ChatCompletionUserMessageContentCopyWith<$Res> { - _$ChatCompletionUserMessageContentCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; -} - -/// @nodoc -abstract class _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartsImplCopyWith( - _$ChatCompletionMessageContentPartsImpl value, - $Res Function(_$ChatCompletionMessageContentPartsImpl) then) = - __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res>; - @useResult - $Res call({List value}); -} -/// @nodoc -class __$$ChatCompletionMessageContentPartsImplCopyWithImpl<$Res> - extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartsImpl> - implements _$$ChatCompletionMessageContentPartsImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartsImplCopyWithImpl( - _$ChatCompletionMessageContentPartsImpl _value, - $Res Function(_$ChatCompletionMessageContentPartsImpl) _then) - : super(_value, _then); + /// Serializes this AssistantToolsFileSearchFileSearch to a JSON map. + Map toJson() => throw _privateConstructorUsedError; - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? value = null, - }) { - return _then(_$ChatCompletionMessageContentPartsImpl( - null == value - ? _value._value - : value // ignore: cast_nullable_to_non_nullable - as List, - )); - } + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $AssistantToolsFileSearchFileSearchCopyWith< + AssistantToolsFileSearchFileSearch> + get copyWith => throw _privateConstructorUsedError; } -/// @nodoc -@JsonSerializable() -class _$ChatCompletionMessageContentPartsImpl - extends ChatCompletionMessageContentParts { - const _$ChatCompletionMessageContentPartsImpl( - final List value, - {final String? $type}) - : _value = value, - $type = $type ?? 'parts', - super._(); - - factory _$ChatCompletionMessageContentPartsImpl.fromJson( - Map json) => - _$$ChatCompletionMessageContentPartsImplFromJson(json); - - final List _value; - @override - List get value { - if (_value is EqualUnmodifiableListView) return _value; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_value); - } - - @JsonKey(name: 'runtimeType') - final String $type; - - @override - String toString() { - return 'ChatCompletionUserMessageContent.parts(value: $value)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartsImpl && - const DeepCollectionEquality().equals(other._value, _value)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => - Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartsImplCopyWith< - _$ChatCompletionMessageContentPartsImpl> - get copyWith => __$$ChatCompletionMessageContentPartsImplCopyWithImpl< - _$ChatCompletionMessageContentPartsImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) { - return parts(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) { - return parts?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) { - if (parts != null) { - return parts(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) { - return parts(this); - } +/// @nodoc +abstract class $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory $AssistantToolsFileSearchFileSearchCopyWith( + AssistantToolsFileSearchFileSearch value, + $Res Function(AssistantToolsFileSearchFileSearch) then) = + _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + AssistantToolsFileSearchFileSearch>; + @useResult + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; +} + +/// @nodoc +class _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + $Val extends AssistantToolsFileSearchFileSearch> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + _$AssistantToolsFileSearchFileSearchCopyWithImpl(this._value, this._then); + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, + $Res call({ + Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { - return parts?.call(this); + return _then(_value.copyWith( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, + ) as $Val); } + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) { - if (parts != null) { - return parts(this); + @pragma('vm:prefer-inline') + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions { + if (_value.rankingOptions == null) { + return null; } - return orElse(); - } - @override - Map toJson() { - return _$$ChatCompletionMessageContentPartsImplToJson( - this, - ); + return $FileSearchRankingOptionsCopyWith<$Res>(_value.rankingOptions!, + (value) { + return _then(_value.copyWith(rankingOptions: value) as $Val); + }); } } -abstract class ChatCompletionMessageContentParts - extends ChatCompletionUserMessageContent { - const factory ChatCompletionMessageContentParts( - final List value) = - _$ChatCompletionMessageContentPartsImpl; - const ChatCompletionMessageContentParts._() : super._(); - - factory ChatCompletionMessageContentParts.fromJson( - Map json) = - _$ChatCompletionMessageContentPartsImpl.fromJson; - - @override - List get value; - @JsonKey(ignore: true) - _$$ChatCompletionMessageContentPartsImplCopyWith< - _$ChatCompletionMessageContentPartsImpl> - get copyWith => throw _privateConstructorUsedError; -} - /// @nodoc -abstract class _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { - factory _$$ChatCompletionUserMessageContentStringImplCopyWith( - _$ChatCompletionUserMessageContentStringImpl value, - $Res Function(_$ChatCompletionUserMessageContentStringImpl) then) = - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res>; +abstract class _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> + implements $AssistantToolsFileSearchFileSearchCopyWith<$Res> { + factory _$$AssistantToolsFileSearchFileSearchImplCopyWith( + _$AssistantToolsFileSearchFileSearchImpl value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) then) = + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res>; + @override @useResult - $Res call({String value}); + $Res call( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? rankingOptions}); + + @override + $FileSearchRankingOptionsCopyWith<$Res>? get rankingOptions; } /// @nodoc -class __$$ChatCompletionUserMessageContentStringImplCopyWithImpl<$Res> - extends _$ChatCompletionUserMessageContentCopyWithImpl<$Res, - _$ChatCompletionUserMessageContentStringImpl> - implements _$$ChatCompletionUserMessageContentStringImplCopyWith<$Res> { - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl( - _$ChatCompletionUserMessageContentStringImpl _value, - $Res Function(_$ChatCompletionUserMessageContentStringImpl) _then) +class __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl<$Res> + extends _$AssistantToolsFileSearchFileSearchCopyWithImpl<$Res, + _$AssistantToolsFileSearchFileSearchImpl> + implements _$$AssistantToolsFileSearchFileSearchImplCopyWith<$Res> { + __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl( + _$AssistantToolsFileSearchFileSearchImpl _value, + $Res Function(_$AssistantToolsFileSearchFileSearchImpl) _then) : super(_value, _then); + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? value = null, + Object? maxNumResults = freezed, + Object? rankingOptions = freezed, }) { - return _then(_$ChatCompletionUserMessageContentStringImpl( - null == value - ? _value.value - : value // ignore: cast_nullable_to_non_nullable - as String, + return _then(_$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: freezed == maxNumResults + ? _value.maxNumResults + : maxNumResults // ignore: cast_nullable_to_non_nullable + as int?, + rankingOptions: freezed == rankingOptions + ? _value.rankingOptions + : rankingOptions // ignore: cast_nullable_to_non_nullable + as FileSearchRankingOptions?, )); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionUserMessageContentStringImpl - extends ChatCompletionUserMessageContentString { - const _$ChatCompletionUserMessageContentStringImpl(this.value, - {final String? $type}) - : $type = $type ?? 'string', - super._(); +class _$AssistantToolsFileSearchFileSearchImpl + extends _AssistantToolsFileSearchFileSearch { + const _$AssistantToolsFileSearchFileSearchImpl( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + this.maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + this.rankingOptions}) + : super._(); - factory _$ChatCompletionUserMessageContentStringImpl.fromJson( + factory _$AssistantToolsFileSearchFileSearchImpl.fromJson( Map json) => - _$$ChatCompletionUserMessageContentStringImplFromJson(json); + _$$AssistantToolsFileSearchFileSearchImplFromJson(json); + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override - final String value; + @JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults; - @JsonKey(name: 'runtimeType') - final String $type; + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions; @override String toString() { - return 'ChatCompletionUserMessageContent.string(value: $value)'; + return 'AssistantToolsFileSearchFileSearch(maxNumResults: $maxNumResults, rankingOptions: $rankingOptions)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionUserMessageContentStringImpl && - (identical(other.value, value) || other.value == value)); + other is _$AssistantToolsFileSearchFileSearchImpl && + (identical(other.maxNumResults, maxNumResults) || + other.maxNumResults == maxNumResults) && + (identical(other.rankingOptions, rankingOptions) || + other.rankingOptions == rankingOptions)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, value); + int get hashCode => Object.hash(runtimeType, maxNumResults, rankingOptions); - @JsonKey(ignore: true) + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionUserMessageContentStringImplCopyWith< - _$ChatCompletionUserMessageContentStringImpl> - get copyWith => - __$$ChatCompletionUserMessageContentStringImplCopyWithImpl< - _$ChatCompletionUserMessageContentStringImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function(List value) - parts, - required TResult Function(String value) string, - }) { - return string(value); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(List value)? parts, - TResult? Function(String value)? string, - }) { - return string?.call(value); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(List value)? parts, - TResult Function(String value)? string, - required TResult orElse(), - }) { - if (string != null) { - return string(value); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(ChatCompletionMessageContentParts value) parts, - required TResult Function(ChatCompletionUserMessageContentString value) - string, - }) { - return string(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentParts value)? parts, - TResult? Function(ChatCompletionUserMessageContentString value)? string, - }) { - return string?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(ChatCompletionMessageContentParts value)? parts, - TResult Function(ChatCompletionUserMessageContentString value)? string, - required TResult orElse(), - }) { - if (string != null) { - return string(this); - } - return orElse(); - } + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> + get copyWith => __$$AssistantToolsFileSearchFileSearchImplCopyWithImpl< + _$AssistantToolsFileSearchFileSearchImpl>(this, _$identity); @override Map toJson() { - return _$$ChatCompletionUserMessageContentStringImplToJson( + return _$$AssistantToolsFileSearchFileSearchImplToJson( this, ); } } -abstract class ChatCompletionUserMessageContentString - extends ChatCompletionUserMessageContent { - const factory ChatCompletionUserMessageContentString(final String value) = - _$ChatCompletionUserMessageContentStringImpl; - const ChatCompletionUserMessageContentString._() : super._(); +abstract class _AssistantToolsFileSearchFileSearch + extends AssistantToolsFileSearchFileSearch { + const factory _AssistantToolsFileSearchFileSearch( + {@JsonKey(name: 'max_num_results', includeIfNull: false) + final int? maxNumResults, + @JsonKey(name: 'ranking_options', includeIfNull: false) + final FileSearchRankingOptions? rankingOptions}) = + _$AssistantToolsFileSearchFileSearchImpl; + const _AssistantToolsFileSearchFileSearch._() : super._(); - factory ChatCompletionUserMessageContentString.fromJson( + factory _AssistantToolsFileSearchFileSearch.fromJson( Map json) = - _$ChatCompletionUserMessageContentStringImpl.fromJson; + _$AssistantToolsFileSearchFileSearchImpl.fromJson; + /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models + /// and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + /// + /// Note that the file search tool may output fewer than `max_num_results` results. See the + /// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. @override - String get value; - @JsonKey(ignore: true) - _$$ChatCompletionUserMessageContentStringImplCopyWith< - _$ChatCompletionUserMessageContentStringImpl> + @JsonKey(name: 'max_num_results', includeIfNull: false) + int? get maxNumResults; + + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + /// a score_threshold of 0. + /// + /// See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + /// for more information. + @override + @JsonKey(name: 'ranking_options', includeIfNull: false) + FileSearchRankingOptions? get rankingOptions; + + /// Create a copy of AssistantToolsFileSearchFileSearch + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AssistantToolsFileSearchFileSearchImplCopyWith< + _$AssistantToolsFileSearchFileSearchImpl> get copyWith => throw _privateConstructorUsedError; } -ChatCompletionMessageContentPart _$ChatCompletionMessageContentPartFromJson( - Map json) { +MessageContent _$MessageContentFromJson(Map json) { switch (json['type']) { - case 'text': - return ChatCompletionMessageContentPartText.fromJson(json); + case 'image_file': + return MessageContentImageFileObject.fromJson(json); case 'image_url': - return ChatCompletionMessageContentPartImage.fromJson(json); + return MessageContentImageUrlObject.fromJson(json); + case 'text': + return MessageContentTextObject.fromJson(json); + case 'refusal': + return MessageContentRefusalObject.fromJson(json); default: - throw CheckedFromJsonException( - json, - 'type', - 'ChatCompletionMessageContentPart', + throw CheckedFromJsonException(json, 'type', 'MessageContent', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$ChatCompletionMessageContentPart { - /// The type of the content part, in this case `text`. - ChatCompletionMessageContentPartType get type => - throw _privateConstructorUsedError; +mixin _$MessageContent { + /// Always `image_file`. + String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageContent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ChatCompletionMessageContentPartCopyWith - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageContentCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChatCompletionMessageContentPartCopyWith<$Res> { - factory $ChatCompletionMessageContentPartCopyWith( - ChatCompletionMessageContentPart value, - $Res Function(ChatCompletionMessageContentPart) then) = - _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - ChatCompletionMessageContentPart>; +abstract class $MessageContentCopyWith<$Res> { + factory $MessageContentCopyWith( + MessageContent value, $Res Function(MessageContent) then) = + _$MessageContentCopyWithImpl<$Res, MessageContent>; @useResult - $Res call({ChatCompletionMessageContentPartType type}); + $Res call({String type}); } /// @nodoc -class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - $Val extends ChatCompletionMessageContentPart> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - _$ChatCompletionMessageContentPartCopyWithImpl(this._value, this._then); +class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> + implements $MessageContentCopyWith<$Res> { + _$MessageContentCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -52158,137 +59916,165 @@ class _$ChatCompletionMessageContentPartCopyWithImpl<$Res, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, + as String, ) as $Val); } } /// @nodoc -abstract class _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartTextImplCopyWith( - _$ChatCompletionMessageContentPartTextImpl value, - $Res Function(_$ChatCompletionMessageContentPartTextImpl) then) = - __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res>; +abstract class _$$MessageContentImageFileObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentImageFileObjectImplCopyWith( + _$MessageContentImageFileObjectImpl value, + $Res Function(_$MessageContentImageFileObjectImpl) then) = + __$$MessageContentImageFileObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({ChatCompletionMessageContentPartType type, String text}); + $Res call( + {String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile}); + + $MessageContentImageFileCopyWith<$Res> get imageFile; } /// @nodoc -class __$$ChatCompletionMessageContentPartTextImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartTextImpl> - implements _$$ChatCompletionMessageContentPartTextImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartTextImplCopyWithImpl( - _$ChatCompletionMessageContentPartTextImpl _value, - $Res Function(_$ChatCompletionMessageContentPartTextImpl) _then) +class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentImageFileObjectImpl> + implements _$$MessageContentImageFileObjectImplCopyWith<$Res> { + __$$MessageContentImageFileObjectImplCopyWithImpl( + _$MessageContentImageFileObjectImpl _value, + $Res Function(_$MessageContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? text = null, + Object? imageFile = null, }) { - return _then(_$ChatCompletionMessageContentPartTextImpl( + return _then(_$MessageContentImageFileObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, - text: null == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable as String, + imageFile: null == imageFile + ? _value.imageFile + : imageFile // ignore: cast_nullable_to_non_nullable + as MessageContentImageFile, )); } + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $MessageContentImageFileCopyWith<$Res> get imageFile { + return $MessageContentImageFileCopyWith<$Res>(_value.imageFile, (value) { + return _then(_value.copyWith(imageFile: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageContentPartTextImpl - extends ChatCompletionMessageContentPartText { - const _$ChatCompletionMessageContentPartTextImpl( - {this.type = ChatCompletionMessageContentPartType.text, - required this.text}) +class _$MessageContentImageFileObjectImpl + extends MessageContentImageFileObject { + const _$MessageContentImageFileObjectImpl( + {this.type = 'image_file', + @JsonKey(name: 'image_file') required this.imageFile}) : super._(); - factory _$ChatCompletionMessageContentPartTextImpl.fromJson( + factory _$MessageContentImageFileObjectImpl.fromJson( Map json) => - _$$ChatCompletionMessageContentPartTextImplFromJson(json); + _$$MessageContentImageFileObjectImplFromJson(json); - /// The type of the content part, in this case `text`. + /// Always `image_file`. @override @JsonKey() - final ChatCompletionMessageContentPartType type; + final String type; - /// The text content. + /// The image file that is part of a message. @override - final String text; + @JsonKey(name: 'image_file') + final MessageContentImageFile imageFile; @override String toString() { - return 'ChatCompletionMessageContentPart.text(type: $type, text: $text)'; + return 'MessageContent.imageFile(type: $type, imageFile: $imageFile)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartTextImpl && + other is _$MessageContentImageFileObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); + (identical(other.imageFile, imageFile) || + other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, text); + int get hashCode => Object.hash(runtimeType, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartTextImplCopyWith< - _$ChatCompletionMessageContentPartTextImpl> - get copyWith => __$$ChatCompletionMessageContentPartTextImplCopyWithImpl< - _$ChatCompletionMessageContentPartTextImpl>(this, _$identity); + _$$MessageContentImageFileObjectImplCopyWith< + _$MessageContentImageFileObjectImpl> + get copyWith => __$$MessageContentImageFileObjectImplCopyWithImpl< + _$MessageContentImageFileObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { - return text(type, this.text); + return imageFile(type, this.imageFile); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { - return text?.call(type, this.text); + return imageFile?.call(type, this.imageFile); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { - if (text != null) { - return text(type, this.text); + if (imageFile != null) { + return imageFile(type, this.imageFile); } return orElse(); } @@ -52296,117 +60082,128 @@ class _$ChatCompletionMessageContentPartTextImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { - return text(this); + return imageFile(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { - return text?.call(this); + return imageFile?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { - if (text != null) { - return text(this); + if (imageFile != null) { + return imageFile(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionMessageContentPartTextImplToJson( + return _$$MessageContentImageFileObjectImplToJson( this, ); } } -abstract class ChatCompletionMessageContentPartText - extends ChatCompletionMessageContentPart { - const factory ChatCompletionMessageContentPartText( - {final ChatCompletionMessageContentPartType type, - required final String text}) = _$ChatCompletionMessageContentPartTextImpl; - const ChatCompletionMessageContentPartText._() : super._(); +abstract class MessageContentImageFileObject extends MessageContent { + const factory MessageContentImageFileObject( + {final String type, + @JsonKey(name: 'image_file') + required final MessageContentImageFile imageFile}) = + _$MessageContentImageFileObjectImpl; + const MessageContentImageFileObject._() : super._(); - factory ChatCompletionMessageContentPartText.fromJson( - Map json) = - _$ChatCompletionMessageContentPartTextImpl.fromJson; + factory MessageContentImageFileObject.fromJson(Map json) = + _$MessageContentImageFileObjectImpl.fromJson; + /// Always `image_file`. @override + String get type; - /// The type of the content part, in this case `text`. - ChatCompletionMessageContentPartType get type; + /// The image file that is part of a message. + @JsonKey(name: 'image_file') + MessageContentImageFile get imageFile; - /// The text content. - String get text; + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$ChatCompletionMessageContentPartTextImplCopyWith< - _$ChatCompletionMessageContentPartTextImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentImageFileObjectImplCopyWith< + _$MessageContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> - implements $ChatCompletionMessageContentPartCopyWith<$Res> { - factory _$$ChatCompletionMessageContentPartImageImplCopyWith( - _$ChatCompletionMessageContentPartImageImpl value, - $Res Function(_$ChatCompletionMessageContentPartImageImpl) then) = - __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res>; +abstract class _$$MessageContentImageUrlObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentImageUrlObjectImplCopyWith( + _$MessageContentImageUrlObjectImpl value, + $Res Function(_$MessageContentImageUrlObjectImpl) then) = + __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl}); + {String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl}); - $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl; + $MessageContentImageUrlCopyWith<$Res> get imageUrl; } /// @nodoc -class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageContentPartCopyWithImpl<$Res, - _$ChatCompletionMessageContentPartImageImpl> - implements _$$ChatCompletionMessageContentPartImageImplCopyWith<$Res> { - __$$ChatCompletionMessageContentPartImageImplCopyWithImpl( - _$ChatCompletionMessageContentPartImageImpl _value, - $Res Function(_$ChatCompletionMessageContentPartImageImpl) _then) +class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentImageUrlObjectImpl> + implements _$$MessageContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageContentImageUrlObjectImplCopyWithImpl( + _$MessageContentImageUrlObjectImpl _value, + $Res Function(_$MessageContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, Object? imageUrl = null, }) { - return _then(_$ChatCompletionMessageContentPartImageImpl( + return _then(_$MessageContentImageUrlObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageContentPartType, + as String, imageUrl: null == imageUrl ? _value.imageUrl : imageUrl // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageUrl, + as MessageContentImageUrl, )); } + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $ChatCompletionMessageImageUrlCopyWith<$Res> get imageUrl { - return $ChatCompletionMessageImageUrlCopyWith<$Res>(_value.imageUrl, - (value) { + $MessageContentImageUrlCopyWith<$Res> get imageUrl { + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl, (value) { return _then(_value.copyWith(imageUrl: value)); }); } @@ -52414,91 +60211,100 @@ class __$$ChatCompletionMessageContentPartImageImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageContentPartImageImpl - extends ChatCompletionMessageContentPartImage { - const _$ChatCompletionMessageContentPartImageImpl( - {this.type = ChatCompletionMessageContentPartType.imageUrl, +class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { + const _$MessageContentImageUrlObjectImpl( + {this.type = 'image_url', @JsonKey(name: 'image_url') required this.imageUrl}) : super._(); - factory _$ChatCompletionMessageContentPartImageImpl.fromJson( + factory _$MessageContentImageUrlObjectImpl.fromJson( Map json) => - _$$ChatCompletionMessageContentPartImageImplFromJson(json); + _$$MessageContentImageUrlObjectImplFromJson(json); - /// The type of the content part, in this case `image_url`. + /// The type of the content part. Always `image_url`. @override @JsonKey() - final ChatCompletionMessageContentPartType type; + final String type; - /// The URL of the image. + /// The image URL part of a message. @override @JsonKey(name: 'image_url') - final ChatCompletionMessageImageUrl imageUrl; + final MessageContentImageUrl imageUrl; @override String toString() { - return 'ChatCompletionMessageContentPart.image(type: $type, imageUrl: $imageUrl)'; + return 'MessageContent.imageUrl(type: $type, imageUrl: $imageUrl)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageContentPartImageImpl && + other is _$MessageContentImageUrlObjectImpl && (identical(other.type, type) || other.type == type) && (identical(other.imageUrl, imageUrl) || other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageContentPartImageImplCopyWith< - _$ChatCompletionMessageContentPartImageImpl> - get copyWith => __$$ChatCompletionMessageContentPartImageImplCopyWithImpl< - _$ChatCompletionMessageContentPartImageImpl>(this, _$identity); + _$$MessageContentImageUrlObjectImplCopyWith< + _$MessageContentImageUrlObjectImpl> + get copyWith => __$$MessageContentImageUrlObjectImplCopyWithImpl< + _$MessageContentImageUrlObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - ChatCompletionMessageContentPartType type, String text) - text, - required TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl) - image, + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { - return image(type, imageUrl); + return imageUrl(type, this.imageUrl); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult? Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { - return image?.call(type, imageUrl); + return imageUrl?.call(type, this.imageUrl); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(ChatCompletionMessageContentPartType type, String text)? - text, - TResult Function(ChatCompletionMessageContentPartType type, - @JsonKey(name: 'image_url') ChatCompletionMessageImageUrl imageUrl)? - image, + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { - if (image != null) { - return image(type, imageUrl); + if (imageUrl != null) { + return imageUrl(type, this.imageUrl); } return orElse(); } @@ -52506,464 +60312,427 @@ class _$ChatCompletionMessageContentPartImageImpl @override @optionalTypeArgs TResult map({ - required TResult Function(ChatCompletionMessageContentPartText value) text, - required TResult Function(ChatCompletionMessageContentPartImage value) - image, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { - return image(this); + return imageUrl(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(ChatCompletionMessageContentPartText value)? text, - TResult? Function(ChatCompletionMessageContentPartImage value)? image, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { - return image?.call(this); + return imageUrl?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(ChatCompletionMessageContentPartText value)? text, - TResult Function(ChatCompletionMessageContentPartImage value)? image, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { - if (image != null) { - return image(this); + if (imageUrl != null) { + return imageUrl(this); } return orElse(); } @override Map toJson() { - return _$$ChatCompletionMessageContentPartImageImplToJson( + return _$$MessageContentImageUrlObjectImplToJson( this, ); } } -abstract class ChatCompletionMessageContentPartImage - extends ChatCompletionMessageContentPart { - const factory ChatCompletionMessageContentPartImage( - {final ChatCompletionMessageContentPartType type, +abstract class MessageContentImageUrlObject extends MessageContent { + const factory MessageContentImageUrlObject( + {final String type, @JsonKey(name: 'image_url') - required final ChatCompletionMessageImageUrl imageUrl}) = - _$ChatCompletionMessageContentPartImageImpl; - const ChatCompletionMessageContentPartImage._() : super._(); + required final MessageContentImageUrl imageUrl}) = + _$MessageContentImageUrlObjectImpl; + const MessageContentImageUrlObject._() : super._(); - factory ChatCompletionMessageContentPartImage.fromJson( - Map json) = - _$ChatCompletionMessageContentPartImageImpl.fromJson; + factory MessageContentImageUrlObject.fromJson(Map json) = + _$MessageContentImageUrlObjectImpl.fromJson; + /// The type of the content part. Always `image_url`. @override + String get type; - /// The type of the content part, in this case `image_url`. - ChatCompletionMessageContentPartType get type; - - /// The URL of the image. + /// The image URL part of a message. @JsonKey(name: 'image_url') - ChatCompletionMessageImageUrl get imageUrl; - @override - @JsonKey(ignore: true) - _$$ChatCompletionMessageContentPartImageImplCopyWith< - _$ChatCompletionMessageContentPartImageImpl> - get copyWith => throw _privateConstructorUsedError; -} - -ChatCompletionMessageImageUrl _$ChatCompletionMessageImageUrlFromJson( - Map json) { - return _ChatCompletionMessageImageUrl.fromJson(json); -} - -/// @nodoc -mixin _$ChatCompletionMessageImageUrl { - /// Either a URL of the image or the base64 encoded image data. - String get url => throw _privateConstructorUsedError; - - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - ChatCompletionMessageImageDetail get detail => - throw _privateConstructorUsedError; + MessageContentImageUrl get imageUrl; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ChatCompletionMessageImageUrlCopyWith + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentImageUrlObjectImplCopyWith< + _$MessageContentImageUrlObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ChatCompletionMessageImageUrlCopyWith<$Res> { - factory $ChatCompletionMessageImageUrlCopyWith( - ChatCompletionMessageImageUrl value, - $Res Function(ChatCompletionMessageImageUrl) then) = - _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - ChatCompletionMessageImageUrl>; +abstract class _$$MessageContentTextObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentTextObjectImplCopyWith( + _$MessageContentTextObjectImpl value, + $Res Function(_$MessageContentTextObjectImpl) then) = + __$$MessageContentTextObjectImplCopyWithImpl<$Res>; + @override @useResult - $Res call({String url, ChatCompletionMessageImageDetail detail}); + $Res call({String type, MessageContentText text}); + + $MessageContentTextCopyWith<$Res> get text; } /// @nodoc -class _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - $Val extends ChatCompletionMessageImageUrl> - implements $ChatCompletionMessageImageUrlCopyWith<$Res> { - _$ChatCompletionMessageImageUrlCopyWithImpl(this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; +class __$$MessageContentTextObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextObjectImpl> + implements _$$MessageContentTextObjectImplCopyWith<$Res> { + __$$MessageContentTextObjectImplCopyWithImpl( + _$MessageContentTextObjectImpl _value, + $Res Function(_$MessageContentTextObjectImpl) _then) + : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? url = null, - Object? detail = null, + Object? type = null, + Object? text = null, }) { - return _then(_value.copyWith( - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable + return _then(_$MessageContentTextObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable as String, - detail: null == detail - ? _value.detail - : detail // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageDetail, - ) as $Val); + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as MessageContentText, + )); } -} -/// @nodoc -abstract class _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> - implements $ChatCompletionMessageImageUrlCopyWith<$Res> { - factory _$$ChatCompletionMessageImageUrlImplCopyWith( - _$ChatCompletionMessageImageUrlImpl value, - $Res Function(_$ChatCompletionMessageImageUrlImpl) then) = - __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res>; + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @useResult - $Res call({String url, ChatCompletionMessageImageDetail detail}); -} - -/// @nodoc -class __$$ChatCompletionMessageImageUrlImplCopyWithImpl<$Res> - extends _$ChatCompletionMessageImageUrlCopyWithImpl<$Res, - _$ChatCompletionMessageImageUrlImpl> - implements _$$ChatCompletionMessageImageUrlImplCopyWith<$Res> { - __$$ChatCompletionMessageImageUrlImplCopyWithImpl( - _$ChatCompletionMessageImageUrlImpl _value, - $Res Function(_$ChatCompletionMessageImageUrlImpl) _then) - : super(_value, _then); - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? url = null, - Object? detail = null, - }) { - return _then(_$ChatCompletionMessageImageUrlImpl( - url: null == url - ? _value.url - : url // ignore: cast_nullable_to_non_nullable - as String, - detail: null == detail - ? _value.detail - : detail // ignore: cast_nullable_to_non_nullable - as ChatCompletionMessageImageDetail, - )); + $MessageContentTextCopyWith<$Res> get text { + return $MessageContentTextCopyWith<$Res>(_value.text, (value) { + return _then(_value.copyWith(text: value)); + }); } } /// @nodoc @JsonSerializable() -class _$ChatCompletionMessageImageUrlImpl - extends _ChatCompletionMessageImageUrl { - const _$ChatCompletionMessageImageUrlImpl( - {required this.url, this.detail = ChatCompletionMessageImageDetail.auto}) +class _$MessageContentTextObjectImpl extends MessageContentTextObject { + const _$MessageContentTextObjectImpl({this.type = 'text', required this.text}) : super._(); - factory _$ChatCompletionMessageImageUrlImpl.fromJson( - Map json) => - _$$ChatCompletionMessageImageUrlImplFromJson(json); + factory _$MessageContentTextObjectImpl.fromJson(Map json) => + _$$MessageContentTextObjectImplFromJson(json); - /// Either a URL of the image or the base64 encoded image data. + /// Always `text`. @override - final String url; + @JsonKey() + final String type; - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + /// The text content that is part of a message. @override - @JsonKey() - final ChatCompletionMessageImageDetail detail; + final MessageContentText text; @override String toString() { - return 'ChatCompletionMessageImageUrl(url: $url, detail: $detail)'; + return 'MessageContent.text(type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ChatCompletionMessageImageUrlImpl && - (identical(other.url, url) || other.url == url) && - (identical(other.detail, detail) || other.detail == detail)); + other is _$MessageContentTextObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, url, detail); + int get hashCode => Object.hash(runtimeType, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$ChatCompletionMessageImageUrlImplCopyWith< - _$ChatCompletionMessageImageUrlImpl> - get copyWith => __$$ChatCompletionMessageImageUrlImplCopyWithImpl< - _$ChatCompletionMessageImageUrlImpl>(this, _$identity); + _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> + get copyWith => __$$MessageContentTextObjectImplCopyWithImpl< + _$MessageContentTextObjectImpl>(this, _$identity); @override - Map toJson() { - return _$$ChatCompletionMessageImageUrlImplToJson( - this, - ); + @optionalTypeArgs + TResult when({ + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, + }) { + return text(type, this.text); } -} - -abstract class _ChatCompletionMessageImageUrl - extends ChatCompletionMessageImageUrl { - const factory _ChatCompletionMessageImageUrl( - {required final String url, - final ChatCompletionMessageImageDetail detail}) = - _$ChatCompletionMessageImageUrlImpl; - const _ChatCompletionMessageImageUrl._() : super._(); - factory _ChatCompletionMessageImageUrl.fromJson(Map json) = - _$ChatCompletionMessageImageUrlImpl.fromJson; - - @override - - /// Either a URL of the image or the base64 encoded image data. - String get url; - @override - - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - ChatCompletionMessageImageDetail get detail; @override - @JsonKey(ignore: true) - _$$ChatCompletionMessageImageUrlImplCopyWith< - _$ChatCompletionMessageImageUrlImpl> - get copyWith => throw _privateConstructorUsedError; -} - -AssistantTools _$AssistantToolsFromJson(Map json) { - switch (json['type']) { - case 'code_interpreter': - return AssistantToolsCodeInterpreter.fromJson(json); - case 'file_search': - return AssistantToolsFileSearch.fromJson(json); - case 'function': - return AssistantToolsFunction.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'AssistantTools', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$AssistantTools { - /// The type of tool being defined: `code_interpreter` - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, - required TResult Function(String type, FunctionObject function) function, - }) => - throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, - TResult? Function(String type, FunctionObject function)? function, - }) => - throw _privateConstructorUsedError; + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, + }) { + return text?.call(type, this.text); + } + + @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (text != null) { + return text(type, this.text); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, - }) => - throw _privateConstructorUsedError; + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, + }) { + return text(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, - }) => - throw _privateConstructorUsedError; + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, + }) { + return text?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $AssistantToolsCopyWith get copyWith => - throw _privateConstructorUsedError; -} + }) { + if (text != null) { + return text(this); + } + return orElse(); + } -/// @nodoc -abstract class $AssistantToolsCopyWith<$Res> { - factory $AssistantToolsCopyWith( - AssistantTools value, $Res Function(AssistantTools) then) = - _$AssistantToolsCopyWithImpl<$Res, AssistantTools>; - @useResult - $Res call({String type}); + @override + Map toJson() { + return _$$MessageContentTextObjectImplToJson( + this, + ); + } } -/// @nodoc -class _$AssistantToolsCopyWithImpl<$Res, $Val extends AssistantTools> - implements $AssistantToolsCopyWith<$Res> { - _$AssistantToolsCopyWithImpl(this._value, this._then); +abstract class MessageContentTextObject extends MessageContent { + const factory MessageContentTextObject( + {final String type, + required final MessageContentText text}) = _$MessageContentTextObjectImpl; + const MessageContentTextObject._() : super._(); - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + factory MessageContentTextObject.fromJson(Map json) = + _$MessageContentTextObjectImpl.fromJson; - @pragma('vm:prefer-inline') + /// Always `text`. @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + String get type; + + /// The text content that is part of a message. + MessageContentText get text; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> + get copyWith => throw _privateConstructorUsedError; } -/// @nodoc -abstract class _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsCodeInterpreterImplCopyWith( - _$AssistantToolsCodeInterpreterImpl value, - $Res Function(_$AssistantToolsCodeInterpreterImpl) then) = - __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res>; +/// @nodoc +abstract class _$$MessageContentRefusalObjectImplCopyWith<$Res> + implements $MessageContentCopyWith<$Res> { + factory _$$MessageContentRefusalObjectImplCopyWith( + _$MessageContentRefusalObjectImpl value, + $Res Function(_$MessageContentRefusalObjectImpl) then) = + __$$MessageContentRefusalObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type}); + $Res call({String type, String refusal}); } /// @nodoc -class __$$AssistantToolsCodeInterpreterImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, - _$AssistantToolsCodeInterpreterImpl> - implements _$$AssistantToolsCodeInterpreterImplCopyWith<$Res> { - __$$AssistantToolsCodeInterpreterImplCopyWithImpl( - _$AssistantToolsCodeInterpreterImpl _value, - $Res Function(_$AssistantToolsCodeInterpreterImpl) _then) +class __$$MessageContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageContentCopyWithImpl<$Res, + _$MessageContentRefusalObjectImpl> + implements _$$MessageContentRefusalObjectImplCopyWith<$Res> { + __$$MessageContentRefusalObjectImplCopyWithImpl( + _$MessageContentRefusalObjectImpl _value, + $Res Function(_$MessageContentRefusalObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, + Object? refusal = null, }) { - return _then(_$AssistantToolsCodeInterpreterImpl( + return _then(_$MessageContentRefusalObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + refusal: null == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String, )); } } /// @nodoc @JsonSerializable() -class _$AssistantToolsCodeInterpreterImpl - extends AssistantToolsCodeInterpreter { - const _$AssistantToolsCodeInterpreterImpl({this.type = 'code_interpreter'}) +class _$MessageContentRefusalObjectImpl extends MessageContentRefusalObject { + const _$MessageContentRefusalObjectImpl( + {required this.type, required this.refusal}) : super._(); - factory _$AssistantToolsCodeInterpreterImpl.fromJson( + factory _$MessageContentRefusalObjectImpl.fromJson( Map json) => - _$$AssistantToolsCodeInterpreterImplFromJson(json); + _$$MessageContentRefusalObjectImplFromJson(json); - /// The type of tool being defined: `code_interpreter` + /// Always `refusal`. @override - @JsonKey() final String type; + /// No Description + @override + final String refusal; + @override String toString() { - return 'AssistantTools.codeInterpreter(type: $type)'; + return 'MessageContent.refusal(type: $type, refusal: $refusal)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsCodeInterpreterImpl && - (identical(other.type, type) || other.type == type)); + other is _$MessageContentRefusalObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.refusal, refusal) || other.refusal == refusal)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, type, refusal); - @JsonKey(ignore: true) + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantToolsCodeInterpreterImplCopyWith< - _$AssistantToolsCodeInterpreterImpl> - get copyWith => __$$AssistantToolsCodeInterpreterImplCopyWithImpl< - _$AssistantToolsCodeInterpreterImpl>(this, _$identity); + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> + get copyWith => __$$MessageContentRefusalObjectImplCopyWithImpl< + _$MessageContentRefusalObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, - required TResult Function(String type, FunctionObject function) function, + required TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + imageFile, + required TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + imageUrl, + required TResult Function(String type, MessageContentText text) text, + required TResult Function(String type, String refusal) refusal, }) { - return codeInterpreter(type); + return refusal(type, this.refusal); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, - TResult? Function(String type, FunctionObject function)? function, + TResult? Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult? Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult? Function(String type, MessageContentText text)? text, + TResult? Function(String type, String refusal)? refusal, }) { - return codeInterpreter?.call(type); + return refusal?.call(type, this.refusal); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function(String type, + @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + imageFile, + TResult Function(String type, + @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + imageUrl, + TResult Function(String type, MessageContentText text)? text, + TResult Function(String type, String refusal)? refusal, required TResult orElse(), }) { - if (codeInterpreter != null) { - return codeInterpreter(type); + if (refusal != null) { + return refusal(type, this.refusal); } return orElse(); } @@ -52971,167 +60740,440 @@ class _$AssistantToolsCodeInterpreterImpl @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, + required TResult Function(MessageContentImageFileObject value) imageFile, + required TResult Function(MessageContentImageUrlObject value) imageUrl, + required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageContentRefusalObject value) refusal, }) { - return codeInterpreter(this); + return refusal(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, + TResult? Function(MessageContentImageFileObject value)? imageFile, + TResult? Function(MessageContentImageUrlObject value)? imageUrl, + TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageContentRefusalObject value)? refusal, }) { - return codeInterpreter?.call(this); + return refusal?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(MessageContentImageFileObject value)? imageFile, + TResult Function(MessageContentImageUrlObject value)? imageUrl, + TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentRefusalObject value)? refusal, required TResult orElse(), }) { - if (codeInterpreter != null) { - return codeInterpreter(this); + if (refusal != null) { + return refusal(this); } return orElse(); } @override Map toJson() { - return _$$AssistantToolsCodeInterpreterImplToJson( + return _$$MessageContentRefusalObjectImplToJson( this, ); } } -abstract class AssistantToolsCodeInterpreter extends AssistantTools { - const factory AssistantToolsCodeInterpreter({final String type}) = - _$AssistantToolsCodeInterpreterImpl; - const AssistantToolsCodeInterpreter._() : super._(); +abstract class MessageContentRefusalObject extends MessageContent { + const factory MessageContentRefusalObject( + {required final String type, + required final String refusal}) = _$MessageContentRefusalObjectImpl; + const MessageContentRefusalObject._() : super._(); - factory AssistantToolsCodeInterpreter.fromJson(Map json) = - _$AssistantToolsCodeInterpreterImpl.fromJson; + factory MessageContentRefusalObject.fromJson(Map json) = + _$MessageContentRefusalObjectImpl.fromJson; + /// Always `refusal`. @override - - /// The type of tool being defined: `code_interpreter` String get type; + + /// No Description + String get refusal; + + /// Create a copy of MessageContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$AssistantToolsCodeInterpreterImplCopyWith< - _$AssistantToolsCodeInterpreterImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentRefusalObjectImplCopyWith<_$MessageContentRefusalObjectImpl> get copyWith => throw _privateConstructorUsedError; } +MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { + switch (json['type']) { + case 'image_file': + return MessageDeltaContentImageFileObject.fromJson(json); + case 'text': + return MessageDeltaContentTextObject.fromJson(json); + case 'refusal': + return MessageDeltaContentRefusalObject.fromJson(json); + case 'image_url': + return MessageDeltaContentImageUrlObject.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', + 'Invalid union type "${json['type']}"!'); + } +} + /// @nodoc -abstract class _$$AssistantToolsFileSearchImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsFileSearchImplCopyWith( - _$AssistantToolsFileSearchImpl value, - $Res Function(_$AssistantToolsFileSearchImpl) then) = - __$$AssistantToolsFileSearchImplCopyWithImpl<$Res>; +mixin _$MessageDeltaContent { + /// The index of the content part in the message. + int get index => throw _privateConstructorUsedError; + + /// Always `image_file`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContent to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageDeltaContentCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageDeltaContentCopyWith<$Res> { + factory $MessageDeltaContentCopyWith( + MessageDeltaContent value, $Res Function(MessageDeltaContent) then) = + _$MessageDeltaContentCopyWithImpl<$Res, MessageDeltaContent>; + @useResult + $Res call({int index, String type}); +} + +/// @nodoc +class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> + implements $MessageDeltaContentCopyWith<$Res> { + _$MessageDeltaContentCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentImageFileObjectImplCopyWith( + _$MessageDeltaContentImageFileObjectImpl value, + $Res Function(_$MessageDeltaContentImageFileObjectImpl) then) = + __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type}); + $Res call( + {int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile}); + + $MessageContentImageFileCopyWith<$Res>? get imageFile; } /// @nodoc -class __$$AssistantToolsFileSearchImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFileSearchImpl> - implements _$$AssistantToolsFileSearchImplCopyWith<$Res> { - __$$AssistantToolsFileSearchImplCopyWithImpl( - _$AssistantToolsFileSearchImpl _value, - $Res Function(_$AssistantToolsFileSearchImpl) _then) +class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentImageFileObjectImpl> + implements _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageFileObjectImplCopyWithImpl( + _$MessageDeltaContentImageFileObjectImpl _value, + $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, + Object? imageFile = freezed, }) { - return _then(_$AssistantToolsFileSearchImpl( + return _then(_$MessageDeltaContentImageFileObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, + imageFile: freezed == imageFile + ? _value.imageFile + : imageFile // ignore: cast_nullable_to_non_nullable + as MessageContentImageFile?, )); } + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $MessageContentImageFileCopyWith<$Res>? get imageFile { + if (_value.imageFile == null) { + return null; + } + + return $MessageContentImageFileCopyWith<$Res>(_value.imageFile!, (value) { + return _then(_value.copyWith(imageFile: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { - const _$AssistantToolsFileSearchImpl({this.type = 'file_search'}) : super._(); +class _$MessageDeltaContentImageFileObjectImpl + extends MessageDeltaContentImageFileObject { + const _$MessageDeltaContentImageFileObjectImpl( + {required this.index, + required this.type, + @JsonKey(name: 'image_file', includeIfNull: false) this.imageFile}) + : super._(); - factory _$AssistantToolsFileSearchImpl.fromJson(Map json) => - _$$AssistantToolsFileSearchImplFromJson(json); + factory _$MessageDeltaContentImageFileObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentImageFileObjectImplFromJson(json); - /// The type of tool being defined: `file_search` + /// The index of the content part in the message. + @override + final int index; + + /// Always `image_file`. @override - @JsonKey() final String type; + /// The image file that is part of a message. + @override + @JsonKey(name: 'image_file', includeIfNull: false) + final MessageContentImageFile? imageFile; + @override String toString() { - return 'AssistantTools.fileSearch(type: $type)'; + return 'MessageDeltaContent.imageFile(index: $index, type: $type, imageFile: $imageFile)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsFileSearchImpl && - (identical(other.type, type) || other.type == type)); + other is _$MessageDeltaContentImageFileObjectImpl && + (identical(other.index, index) || other.index == index) && + (identical(other.type, type) || other.type == type) && + (identical(other.imageFile, imageFile) || + other.imageFile == imageFile)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type); + int get hashCode => Object.hash(runtimeType, index, type, imageFile); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> - get copyWith => __$$AssistantToolsFileSearchImplCopyWithImpl< - _$AssistantToolsFileSearchImpl>(this, _$identity); + _$$MessageDeltaContentImageFileObjectImplCopyWith< + _$MessageDeltaContentImageFileObjectImpl> + get copyWith => __$$MessageDeltaContentImageFileObjectImplCopyWithImpl< + _$MessageDeltaContentImageFileObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, - required TResult Function(String type, FunctionObject function) function, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { - return fileSearch(type); + return imageFile(index, type, this.imageFile); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, - TResult? Function(String type, FunctionObject function)? function, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { - return fileSearch?.call(type); + return imageFile?.call(index, type, this.imageFile); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(type); + if (imageFile != null) { + return imageFile(index, type, this.imageFile); } return orElse(); } @@ -53139,189 +61181,281 @@ class _$AssistantToolsFileSearchImpl extends AssistantToolsFileSearch { @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { - return fileSearch(this); + return imageFile(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { - return fileSearch?.call(this); + return imageFile?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(this); + if (imageFile != null) { + return imageFile(this); } return orElse(); } @override Map toJson() { - return _$$AssistantToolsFileSearchImplToJson( + return _$$MessageDeltaContentImageFileObjectImplToJson( this, ); } } -abstract class AssistantToolsFileSearch extends AssistantTools { - const factory AssistantToolsFileSearch({final String type}) = - _$AssistantToolsFileSearchImpl; - const AssistantToolsFileSearch._() : super._(); +abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { + const factory MessageDeltaContentImageFileObject( + {required final int index, + required final String type, + @JsonKey(name: 'image_file', includeIfNull: false) + final MessageContentImageFile? imageFile}) = + _$MessageDeltaContentImageFileObjectImpl; + const MessageDeltaContentImageFileObject._() : super._(); - factory AssistantToolsFileSearch.fromJson(Map json) = - _$AssistantToolsFileSearchImpl.fromJson; + factory MessageDeltaContentImageFileObject.fromJson( + Map json) = + _$MessageDeltaContentImageFileObjectImpl.fromJson; + /// The index of the content part in the message. @override + int get index; - /// The type of tool being defined: `file_search` + /// Always `image_file`. + @override String get type; + + /// The image file that is part of a message. + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? get imageFile; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$AssistantToolsFileSearchImplCopyWith<_$AssistantToolsFileSearchImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentImageFileObjectImplCopyWith< + _$MessageDeltaContentImageFileObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$AssistantToolsFunctionImplCopyWith<$Res> - implements $AssistantToolsCopyWith<$Res> { - factory _$$AssistantToolsFunctionImplCopyWith( - _$AssistantToolsFunctionImpl value, - $Res Function(_$AssistantToolsFunctionImpl) then) = - __$$AssistantToolsFunctionImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentTextObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentTextObjectImplCopyWith( + _$MessageDeltaContentTextObjectImpl value, + $Res Function(_$MessageDeltaContentTextObjectImpl) then) = + __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, FunctionObject function}); + $Res call( + {int index, + String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text}); - $FunctionObjectCopyWith<$Res> get function; + $MessageDeltaContentTextCopyWith<$Res>? get text; } /// @nodoc -class __$$AssistantToolsFunctionImplCopyWithImpl<$Res> - extends _$AssistantToolsCopyWithImpl<$Res, _$AssistantToolsFunctionImpl> - implements _$$AssistantToolsFunctionImplCopyWith<$Res> { - __$$AssistantToolsFunctionImplCopyWithImpl( - _$AssistantToolsFunctionImpl _value, - $Res Function(_$AssistantToolsFunctionImpl) _then) +class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentTextObjectImpl> + implements _$$MessageDeltaContentTextObjectImplCopyWith<$Res> { + __$$MessageDeltaContentTextObjectImplCopyWithImpl( + _$MessageDeltaContentTextObjectImpl _value, + $Res Function(_$MessageDeltaContentTextObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? function = null, + Object? text = freezed, }) { - return _then(_$AssistantToolsFunctionImpl( + return _then(_$MessageDeltaContentTextObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - function: null == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as FunctionObject, + text: freezed == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as MessageDeltaContentText?, )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $FunctionObjectCopyWith<$Res> get function { - return $FunctionObjectCopyWith<$Res>(_value.function, (value) { - return _then(_value.copyWith(function: value)); + $MessageDeltaContentTextCopyWith<$Res>? get text { + if (_value.text == null) { + return null; + } + + return $MessageDeltaContentTextCopyWith<$Res>(_value.text!, (value) { + return _then(_value.copyWith(text: value)); }); } } /// @nodoc @JsonSerializable() -class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { - const _$AssistantToolsFunctionImpl( - {this.type = 'function', required this.function}) +class _$MessageDeltaContentTextObjectImpl + extends MessageDeltaContentTextObject { + const _$MessageDeltaContentTextObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text}) : super._(); - factory _$AssistantToolsFunctionImpl.fromJson(Map json) => - _$$AssistantToolsFunctionImplFromJson(json); + factory _$MessageDeltaContentTextObjectImpl.fromJson( + Map json) => + _$$MessageDeltaContentTextObjectImplFromJson(json); - /// The type of tool being defined: `function` + /// The index of the content part in the message. + @override + final int index; + + /// Always `text`. @override - @JsonKey() final String type; - /// A function that the model may call. + /// The text content that is part of a message. @override - final FunctionObject function; + @JsonKey(includeIfNull: false) + final MessageDeltaContentText? text; @override String toString() { - return 'AssistantTools.function(type: $type, function: $function)'; + return 'MessageDeltaContent.text(index: $index, type: $type, text: $text)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$AssistantToolsFunctionImpl && + other is _$MessageDeltaContentTextObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - (identical(other.function, function) || - other.function == function)); + (identical(other.text, text) || other.text == text)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, function); + int get hashCode => Object.hash(runtimeType, index, type, text); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> - get copyWith => __$$AssistantToolsFunctionImplCopyWithImpl< - _$AssistantToolsFunctionImpl>(this, _$identity); + _$$MessageDeltaContentTextObjectImplCopyWith< + _$MessageDeltaContentTextObjectImpl> + get copyWith => __$$MessageDeltaContentTextObjectImplCopyWithImpl< + _$MessageDeltaContentTextObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type) codeInterpreter, - required TResult Function(String type) fileSearch, - required TResult Function(String type, FunctionObject function) function, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) + imageFile, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) + imageUrl, }) { - return function(type, this.function); + return text(index, type, this.text); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type)? codeInterpreter, - TResult? Function(String type)? fileSearch, - TResult? Function(String type, FunctionObject function)? function, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, }) { - return function?.call(type, this.function); + return text?.call(index, type, this.text); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type)? codeInterpreter, - TResult Function(String type)? fileSearch, - TResult Function(String type, FunctionObject function)? function, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? + imageFile, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? + imageUrl, required TResult orElse(), }) { - if (function != null) { - return function(type, this.function); + if (text != null) { + return text(index, type, this.text); } return orElse(); } @@ -53329,327 +61463,261 @@ class _$AssistantToolsFunctionImpl extends AssistantToolsFunction { @override @optionalTypeArgs TResult map({ - required TResult Function(AssistantToolsCodeInterpreter value) - codeInterpreter, - required TResult Function(AssistantToolsFileSearch value) fileSearch, - required TResult Function(AssistantToolsFunction value) function, + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { - return function(this); + return text(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult? Function(AssistantToolsFileSearch value)? fileSearch, - TResult? Function(AssistantToolsFunction value)? function, + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { - return function?.call(this); + return text?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(AssistantToolsCodeInterpreter value)? codeInterpreter, - TResult Function(AssistantToolsFileSearch value)? fileSearch, - TResult Function(AssistantToolsFunction value)? function, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { - if (function != null) { - return function(this); + if (text != null) { + return text(this); } return orElse(); } @override Map toJson() { - return _$$AssistantToolsFunctionImplToJson( + return _$$MessageDeltaContentTextObjectImplToJson( this, ); } } -abstract class AssistantToolsFunction extends AssistantTools { - const factory AssistantToolsFunction( - {final String type, - required final FunctionObject function}) = _$AssistantToolsFunctionImpl; - const AssistantToolsFunction._() : super._(); +abstract class MessageDeltaContentTextObject extends MessageDeltaContent { + const factory MessageDeltaContentTextObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final MessageDeltaContentText? text}) = + _$MessageDeltaContentTextObjectImpl; + const MessageDeltaContentTextObject._() : super._(); - factory AssistantToolsFunction.fromJson(Map json) = - _$AssistantToolsFunctionImpl.fromJson; + factory MessageDeltaContentTextObject.fromJson(Map json) = + _$MessageDeltaContentTextObjectImpl.fromJson; + /// The index of the content part in the message. @override + int get index; - /// The type of tool being defined: `function` - String get type; - - /// A function that the model may call. - FunctionObject get function; + /// Always `text`. @override - @JsonKey(ignore: true) - _$$AssistantToolsFunctionImplCopyWith<_$AssistantToolsFunctionImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageContent _$MessageContentFromJson(Map json) { - switch (json['type']) { - case 'image_file': - return MessageContentImageFileObject.fromJson(json); - case 'image_url': - return MessageContentImageUrlObject.fromJson(json); - case 'text': - return MessageContentTextObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'MessageContent', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$MessageContent { - /// Always `image_file`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageContentCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $MessageContentCopyWith<$Res> { - factory $MessageContentCopyWith( - MessageContent value, $Res Function(MessageContent) then) = - _$MessageContentCopyWithImpl<$Res, MessageContent>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$MessageContentCopyWithImpl<$Res, $Val extends MessageContent> - implements $MessageContentCopyWith<$Res> { - _$MessageContentCopyWithImpl(this._value, this._then); + String get type; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The text content that is part of a message. + @JsonKey(includeIfNull: false) + MessageDeltaContentText? get text; - @pragma('vm:prefer-inline') + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentTextObjectImplCopyWith< + _$MessageDeltaContentTextObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentImageFileObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentImageFileObjectImplCopyWith( - _$MessageContentImageFileObjectImpl value, - $Res Function(_$MessageContentImageFileObjectImpl) then) = - __$$MessageContentImageFileObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentRefusalObjectImplCopyWith( + _$MessageDeltaContentRefusalObjectImpl value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) then) = + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile}); - - $MessageContentImageFileCopyWith<$Res> get imageFile; + {int index, String type, @JsonKey(includeIfNull: false) String? refusal}); } /// @nodoc -class __$$MessageContentImageFileObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, - _$MessageContentImageFileObjectImpl> - implements _$$MessageContentImageFileObjectImplCopyWith<$Res> { - __$$MessageContentImageFileObjectImplCopyWithImpl( - _$MessageContentImageFileObjectImpl _value, - $Res Function(_$MessageContentImageFileObjectImpl) _then) +class __$$MessageDeltaContentRefusalObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentRefusalObjectImpl> + implements _$$MessageDeltaContentRefusalObjectImplCopyWith<$Res> { + __$$MessageDeltaContentRefusalObjectImplCopyWithImpl( + _$MessageDeltaContentRefusalObjectImpl _value, + $Res Function(_$MessageDeltaContentRefusalObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? imageFile = null, + Object? refusal = freezed, }) { - return _then(_$MessageContentImageFileObjectImpl( + return _then(_$MessageDeltaContentRefusalObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - imageFile: null == imageFile - ? _value.imageFile - : imageFile // ignore: cast_nullable_to_non_nullable - as MessageContentImageFile, + refusal: freezed == refusal + ? _value.refusal + : refusal // ignore: cast_nullable_to_non_nullable + as String?, )); } - - @override - @pragma('vm:prefer-inline') - $MessageContentImageFileCopyWith<$Res> get imageFile { - return $MessageContentImageFileCopyWith<$Res>(_value.imageFile, (value) { - return _then(_value.copyWith(imageFile: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageContentImageFileObjectImpl - extends MessageContentImageFileObject { - const _$MessageContentImageFileObjectImpl( - {this.type = 'image_file', - @JsonKey(name: 'image_file') required this.imageFile}) +class _$MessageDeltaContentRefusalObjectImpl + extends MessageDeltaContentRefusalObject { + const _$MessageDeltaContentRefusalObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.refusal}) : super._(); - factory _$MessageContentImageFileObjectImpl.fromJson( + factory _$MessageDeltaContentRefusalObjectImpl.fromJson( Map json) => - _$$MessageContentImageFileObjectImplFromJson(json); + _$$MessageDeltaContentRefusalObjectImplFromJson(json); - /// Always `image_file`. + /// The index of the refusal part in the message. + @override + final int index; + + /// Always `refusal`. @override - @JsonKey() final String type; - /// The image file that is part of a message. + /// The refusal content generated by the assistant. @override - @JsonKey(name: 'image_file') - final MessageContentImageFile imageFile; + @JsonKey(includeIfNull: false) + final String? refusal; @override String toString() { - return 'MessageContent.imageFile(type: $type, imageFile: $imageFile)'; + return 'MessageDeltaContent.refusal(index: $index, type: $type, refusal: $refusal)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentImageFileObjectImpl && + other is _$MessageDeltaContentRefusalObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && - (identical(other.imageFile, imageFile) || - other.imageFile == imageFile)); + (identical(other.refusal, refusal) || other.refusal == refusal)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, imageFile); + int get hashCode => Object.hash(runtimeType, index, type, refusal); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentImageFileObjectImplCopyWith< - _$MessageContentImageFileObjectImpl> - get copyWith => __$$MessageContentImageFileObjectImplCopyWithImpl< - _$MessageContentImageFileObjectImpl>(this, _$identity); + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> + get copyWith => __$$MessageDeltaContentRefusalObjectImplCopyWithImpl< + _$MessageDeltaContentRefusalObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) imageUrl, - required TResult Function(String type, MessageContentText text) text, }) { - return imageFile(type, this.imageFile); + return refusal(index, type, this.refusal); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? imageUrl, - TResult? Function(String type, MessageContentText text)? text, }) { - return imageFile?.call(type, this.imageFile); + return refusal?.call(index, type, this.refusal); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? imageUrl, - TResult Function(String type, MessageContentText text)? text, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(type, this.imageFile); + if (refusal != null) { + return refusal(index, type, this.refusal); } return orElse(); } @@ -53657,119 +61725,144 @@ class _$MessageContentImageFileObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { - return imageFile(this); + return refusal(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { - return imageFile?.call(this); + return refusal?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(this); + if (refusal != null) { + return refusal(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentImageFileObjectImplToJson( + return _$$MessageDeltaContentRefusalObjectImplToJson( this, ); } } -abstract class MessageContentImageFileObject extends MessageContent { - const factory MessageContentImageFileObject( - {final String type, - @JsonKey(name: 'image_file') - required final MessageContentImageFile imageFile}) = - _$MessageContentImageFileObjectImpl; - const MessageContentImageFileObject._() : super._(); +abstract class MessageDeltaContentRefusalObject extends MessageDeltaContent { + const factory MessageDeltaContentRefusalObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? refusal}) = + _$MessageDeltaContentRefusalObjectImpl; + const MessageDeltaContentRefusalObject._() : super._(); - factory MessageContentImageFileObject.fromJson(Map json) = - _$MessageContentImageFileObjectImpl.fromJson; + factory MessageDeltaContentRefusalObject.fromJson(Map json) = + _$MessageDeltaContentRefusalObjectImpl.fromJson; + /// The index of the refusal part in the message. @override + int get index; - /// Always `image_file`. + /// Always `refusal`. + @override String get type; - /// The image file that is part of a message. - @JsonKey(name: 'image_file') - MessageContentImageFile get imageFile; + /// The refusal content generated by the assistant. + @JsonKey(includeIfNull: false) + String? get refusal; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageContentImageFileObjectImplCopyWith< - _$MessageContentImageFileObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentRefusalObjectImplCopyWith< + _$MessageDeltaContentRefusalObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentImageUrlObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentImageUrlObjectImplCopyWith( - _$MessageContentImageUrlObjectImpl value, - $Res Function(_$MessageContentImageUrlObjectImpl) then) = - __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> + implements $MessageDeltaContentCopyWith<$Res> { + factory _$$MessageDeltaContentImageUrlObjectImplCopyWith( + _$MessageDeltaContentImageUrlObjectImpl value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) then) = + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl}); + {int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl}); - $MessageContentImageUrlCopyWith<$Res> get imageUrl; + $MessageContentImageUrlCopyWith<$Res>? get imageUrl; } /// @nodoc -class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, - _$MessageContentImageUrlObjectImpl> - implements _$$MessageContentImageUrlObjectImplCopyWith<$Res> { - __$$MessageContentImageUrlObjectImplCopyWithImpl( - _$MessageContentImageUrlObjectImpl _value, - $Res Function(_$MessageContentImageUrlObjectImpl) _then) +class __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentCopyWithImpl<$Res, + _$MessageDeltaContentImageUrlObjectImpl> + implements _$$MessageDeltaContentImageUrlObjectImplCopyWith<$Res> { + __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl( + _$MessageDeltaContentImageUrlObjectImpl _value, + $Res Function(_$MessageDeltaContentImageUrlObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? imageUrl = null, + Object? imageUrl = freezed, }) { - return _then(_$MessageContentImageUrlObjectImpl( + return _then(_$MessageDeltaContentImageUrlObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - imageUrl: null == imageUrl + imageUrl: freezed == imageUrl ? _value.imageUrl : imageUrl // ignore: cast_nullable_to_non_nullable - as MessageContentImageUrl, + as MessageContentImageUrl?, )); } + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentImageUrlCopyWith<$Res> get imageUrl { - return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl, (value) { + $MessageContentImageUrlCopyWith<$Res>? get imageUrl { + if (_value.imageUrl == null) { + return null; + } + + return $MessageContentImageUrlCopyWith<$Res>(_value.imageUrl!, (value) { return _then(_value.copyWith(imageUrl: value)); }); } @@ -53777,95 +61870,136 @@ class __$$MessageContentImageUrlObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { - const _$MessageContentImageUrlObjectImpl( - {this.type = 'image_url', - @JsonKey(name: 'image_url') required this.imageUrl}) +class _$MessageDeltaContentImageUrlObjectImpl + extends MessageDeltaContentImageUrlObject { + const _$MessageDeltaContentImageUrlObjectImpl( + {required this.index, + required this.type, + @JsonKey(name: 'image_url', includeIfNull: false) this.imageUrl}) : super._(); - factory _$MessageContentImageUrlObjectImpl.fromJson( + factory _$MessageDeltaContentImageUrlObjectImpl.fromJson( Map json) => - _$$MessageContentImageUrlObjectImplFromJson(json); + _$$MessageDeltaContentImageUrlObjectImplFromJson(json); - /// The type of the content part. Always `image_url`. + /// The index of the content part in the message. + @override + final int index; + + /// Always `image_url`. @override - @JsonKey() final String type; /// The image URL part of a message. @override - @JsonKey(name: 'image_url') - final MessageContentImageUrl imageUrl; + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl; @override String toString() { - return 'MessageContent.imageUrl(type: $type, imageUrl: $imageUrl)'; + return 'MessageDeltaContent.imageUrl(index: $index, type: $type, imageUrl: $imageUrl)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentImageUrlObjectImpl && + other is _$MessageDeltaContentImageUrlObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && (identical(other.imageUrl, imageUrl) || other.imageUrl == imageUrl)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, imageUrl); + int get hashCode => Object.hash(runtimeType, index, type, imageUrl); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentImageUrlObjectImplCopyWith< - _$MessageContentImageUrlObjectImpl> - get copyWith => __$$MessageContentImageUrlObjectImplCopyWithImpl< - _$MessageContentImageUrlObjectImpl>(this, _$identity); + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => __$$MessageDeltaContentImageUrlObjectImplCopyWithImpl< + _$MessageDeltaContentImageUrlObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile) imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text) + text, + required TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal) + refusal, + required TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl) imageUrl, - required TResult Function(String type, MessageContentText text) text, }) { - return imageUrl(type, this.imageUrl); + return imageUrl(index, type, this.imageUrl); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult? Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? imageUrl, - TResult? Function(String type, MessageContentText text)? text, }) { - return imageUrl?.call(type, this.imageUrl); + return imageUrl?.call(index, type, this.imageUrl); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? + TResult Function( + int index, + String type, + @JsonKey(name: 'image_file', includeIfNull: false) + MessageContentImageFile? imageFile)? imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? + text, + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? refusal)? + refusal, + TResult Function( + int index, + String type, + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? imageUrl)? imageUrl, - TResult Function(String type, MessageContentText text)? text, required TResult orElse(), }) { if (imageUrl != null) { - return imageUrl(type, this.imageUrl); + return imageUrl(index, type, this.imageUrl); } return orElse(); } @@ -53873,9 +62007,11 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, + required TResult Function(MessageDeltaContentImageFileObject value) + imageFile, + required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function(MessageDeltaContentRefusalObject value) refusal, + required TResult Function(MessageDeltaContentImageUrlObject value) imageUrl, }) { return imageUrl(this); } @@ -53883,9 +62019,10 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, + TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageDeltaContentRefusalObject value)? refusal, + TResult? Function(MessageDeltaContentImageUrlObject value)? imageUrl, }) { return imageUrl?.call(this); } @@ -53893,9 +62030,10 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageDeltaContentImageFileObject value)? imageFile, + TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageDeltaContentRefusalObject value)? refusal, + TResult Function(MessageDeltaContentImageUrlObject value)? imageUrl, required TResult orElse(), }) { if (imageUrl != null) { @@ -53906,68 +62044,278 @@ class _$MessageContentImageUrlObjectImpl extends MessageContentImageUrlObject { @override Map toJson() { - return _$$MessageContentImageUrlObjectImplToJson( + return _$$MessageDeltaContentImageUrlObjectImplToJson( this, ); } } -abstract class MessageContentImageUrlObject extends MessageContent { - const factory MessageContentImageUrlObject( - {final String type, - @JsonKey(name: 'image_url') - required final MessageContentImageUrl imageUrl}) = - _$MessageContentImageUrlObjectImpl; - const MessageContentImageUrlObject._() : super._(); +abstract class MessageDeltaContentImageUrlObject extends MessageDeltaContent { + const factory MessageDeltaContentImageUrlObject( + {required final int index, + required final String type, + @JsonKey(name: 'image_url', includeIfNull: false) + final MessageContentImageUrl? imageUrl}) = + _$MessageDeltaContentImageUrlObjectImpl; + const MessageDeltaContentImageUrlObject._() : super._(); - factory MessageContentImageUrlObject.fromJson(Map json) = - _$MessageContentImageUrlObjectImpl.fromJson; + factory MessageDeltaContentImageUrlObject.fromJson( + Map json) = + _$MessageDeltaContentImageUrlObjectImpl.fromJson; + /// The index of the content part in the message. @override + int get index; - /// The type of the content part. Always `image_url`. + /// Always `image_url`. + @override String get type; /// The image URL part of a message. - @JsonKey(name: 'image_url') - MessageContentImageUrl get imageUrl; + @JsonKey(name: 'image_url', includeIfNull: false) + MessageContentImageUrl? get imageUrl; + + /// Create a copy of MessageDeltaContent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageContentImageUrlObjectImplCopyWith< - _$MessageContentImageUrlObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentImageUrlObjectImplCopyWith< + _$MessageDeltaContentImageUrlObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( + Map json) { + switch (json['type']) { + case 'file_citation': + return MessageContentTextAnnotationsFileCitationObject.fromJson(json); + case 'file_path': + return MessageContentTextAnnotationsFilePathObject.fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'MessageContentTextAnnotations', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$MessageContentTextAnnotations { + /// Always `file_citation`. + String get type => throw _privateConstructorUsedError; + + /// The text in the message content that needs to be replaced. + String get text => throw _privateConstructorUsedError; + + /// The start index of the text in the message content that needs to be replaced. + @JsonKey(name: 'start_index') + int get startIndex => throw _privateConstructorUsedError; + + /// The end index of the text in the message content that needs to be replaced. + @JsonKey(name: 'end_index') + int get endIndex => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + fileCitation, + required TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + filePath, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult? Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + MessageContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function(MessageContentTextAnnotationsFilePathObject value) + filePath, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this MessageContentTextAnnotations to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageContentTextAnnotationsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentTextObjectImplCopyWith<$Res> - implements $MessageContentCopyWith<$Res> { - factory _$$MessageContentTextObjectImplCopyWith( - _$MessageContentTextObjectImpl value, - $Res Function(_$MessageContentTextObjectImpl) then) = - __$$MessageContentTextObjectImplCopyWithImpl<$Res>; +abstract class $MessageContentTextAnnotationsCopyWith<$Res> { + factory $MessageContentTextAnnotationsCopyWith( + MessageContentTextAnnotations value, + $Res Function(MessageContentTextAnnotations) then) = + _$MessageContentTextAnnotationsCopyWithImpl<$Res, + MessageContentTextAnnotations>; + @useResult + $Res call( + {String type, + String text, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); +} + +/// @nodoc +class _$MessageContentTextAnnotationsCopyWithImpl<$Res, + $Val extends MessageContentTextAnnotations> + implements $MessageContentTextAnnotationsCopyWith<$Res> { + _$MessageContentTextAnnotationsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? text = null, + Object? startIndex = null, + Object? endIndex = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + startIndex: null == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int, + endIndex: null == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> implements $MessageContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith( + _$MessageContentTextAnnotationsFileCitationObjectImpl value, + $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) + then) = + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, MessageContentText text}); + $Res call( + {String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); - $MessageContentTextCopyWith<$Res> get text; + $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation; } /// @nodoc -class __$$MessageContentTextObjectImplCopyWithImpl<$Res> - extends _$MessageContentCopyWithImpl<$Res, _$MessageContentTextObjectImpl> - implements _$$MessageContentTextObjectImplCopyWith<$Res> { - __$$MessageContentTextObjectImplCopyWithImpl( - _$MessageContentTextObjectImpl _value, - $Res Function(_$MessageContentTextObjectImpl) _then) +class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFileCitationObjectImpl> + implements + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl( + _$MessageContentTextAnnotationsFileCitationObjectImpl _value, + $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) + _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, Object? text = null, + Object? fileCitation = null, + Object? startIndex = null, + Object? endIndex = null, }) { - return _then(_$MessageContentTextObjectImpl( + return _then(_$MessageContentTextAnnotationsFileCitationObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -53975,104 +62323,180 @@ class __$$MessageContentTextObjectImplCopyWithImpl<$Res> text: null == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as MessageContentText, + as String, + fileCitation: null == fileCitation + ? _value.fileCitation + : fileCitation // ignore: cast_nullable_to_non_nullable + as MessageContentTextAnnotationsFileCitation, + startIndex: null == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int, + endIndex: null == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int, )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentTextCopyWith<$Res> get text { - return $MessageContentTextCopyWith<$Res>(_value.text, (value) { - return _then(_value.copyWith(text: value)); + $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { + return $MessageContentTextAnnotationsFileCitationCopyWith<$Res>( + _value.fileCitation, (value) { + return _then(_value.copyWith(fileCitation: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageContentTextObjectImpl extends MessageContentTextObject { - const _$MessageContentTextObjectImpl({this.type = 'text', required this.text}) +class _$MessageContentTextAnnotationsFileCitationObjectImpl + extends MessageContentTextAnnotationsFileCitationObject { + const _$MessageContentTextAnnotationsFileCitationObjectImpl( + {required this.type, + required this.text, + @JsonKey(name: 'file_citation') required this.fileCitation, + @JsonKey(name: 'start_index') required this.startIndex, + @JsonKey(name: 'end_index') required this.endIndex}) : super._(); - factory _$MessageContentTextObjectImpl.fromJson(Map json) => - _$$MessageContentTextObjectImplFromJson(json); + factory _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson( + Map json) => + _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson(json); - /// Always `text`. + /// Always `file_citation`. @override - @JsonKey() final String type; - /// The text content that is part of a message. + /// The text in the message content that needs to be replaced. @override - final MessageContentText text; + final String text; + + /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. + @override + @JsonKey(name: 'file_citation') + final MessageContentTextAnnotationsFileCitation fileCitation; + + /// The start index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'start_index') + final int startIndex; + + /// The end index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'end_index') + final int endIndex; @override String toString() { - return 'MessageContent.text(type: $type, text: $text)'; + return 'MessageContentTextAnnotations.fileCitation(type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextObjectImpl && + other is _$MessageContentTextAnnotationsFileCitationObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); + (identical(other.text, text) || other.text == text) && + (identical(other.fileCitation, fileCitation) || + other.fileCitation == fileCitation) && + (identical(other.startIndex, startIndex) || + other.startIndex == startIndex) && + (identical(other.endIndex, endIndex) || + other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, text); + int get hashCode => + Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> - get copyWith => __$$MessageContentTextObjectImplCopyWithImpl< - _$MessageContentTextObjectImpl>(this, _$identity); + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageContentTextAnnotationsFileCitationObjectImpl> + get copyWith => + __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + _$MessageContentTextAnnotationsFileCitationObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile) - imageFile, - required TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl) - imageUrl, - required TResult Function(String type, MessageContentText text) text, + required TResult Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + fileCitation, + required TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + filePath, }) { - return text(type, this.text); + return fileCitation(type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult? Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult? Function(String type, MessageContentText text)? text, + TResult? Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult? Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, }) { - return text?.call(type, this.text); + return fileCitation?.call( + type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, - @JsonKey(name: 'image_file') MessageContentImageFile imageFile)? - imageFile, - TResult Function(String type, - @JsonKey(name: 'image_url') MessageContentImageUrl imageUrl)? - imageUrl, - TResult Function(String type, MessageContentText text)? text, + TResult Function( + String type, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, required TResult orElse(), }) { - if (text != null) { - return text(type, this.text); + if (fileCitation != null) { + return fileCitation(type, text, this.fileCitation, startIndex, endIndex); } return orElse(); } @@ -54080,358 +62504,320 @@ class _$MessageContentTextObjectImpl extends MessageContentTextObject { @override @optionalTypeArgs TResult map({ - required TResult Function(MessageContentImageFileObject value) imageFile, - required TResult Function(MessageContentImageUrlObject value) imageUrl, - required TResult Function(MessageContentTextObject value) text, + required TResult Function( + MessageContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function(MessageContentTextAnnotationsFilePathObject value) + filePath, }) { - return text(this); + return fileCitation(this); } @override @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageContentImageFileObject value)? imageFile, - TResult? Function(MessageContentImageUrlObject value)? imageUrl, - TResult? Function(MessageContentTextObject value)? text, + TResult? mapOrNull({ + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, }) { - return text?.call(this); + return fileCitation?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentImageFileObject value)? imageFile, - TResult Function(MessageContentImageUrlObject value)? imageUrl, - TResult Function(MessageContentTextObject value)? text, + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, required TResult orElse(), }) { - if (text != null) { - return text(this); + if (fileCitation != null) { + return fileCitation(this); } return orElse(); } @override Map toJson() { - return _$$MessageContentTextObjectImplToJson( + return _$$MessageContentTextAnnotationsFileCitationObjectImplToJson( this, ); } } -abstract class MessageContentTextObject extends MessageContent { - const factory MessageContentTextObject( - {final String type, - required final MessageContentText text}) = _$MessageContentTextObjectImpl; - const MessageContentTextObject._() : super._(); +abstract class MessageContentTextAnnotationsFileCitationObject + extends MessageContentTextAnnotations { + const factory MessageContentTextAnnotationsFileCitationObject( + {required final String type, + required final String text, + @JsonKey(name: 'file_citation') + required final MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') required final int startIndex, + @JsonKey(name: 'end_index') required final int endIndex}) = + _$MessageContentTextAnnotationsFileCitationObjectImpl; + const MessageContentTextAnnotationsFileCitationObject._() : super._(); - factory MessageContentTextObject.fromJson(Map json) = - _$MessageContentTextObjectImpl.fromJson; + factory MessageContentTextAnnotationsFileCitationObject.fromJson( + Map json) = + _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; + /// Always `file_citation`. @override - - /// Always `text`. String get type; - /// The text content that is part of a message. - MessageContentText get text; + /// The text in the message content that needs to be replaced. @override - @JsonKey(ignore: true) - _$$MessageContentTextObjectImplCopyWith<_$MessageContentTextObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageDeltaContent _$MessageDeltaContentFromJson(Map json) { - switch (json['type']) { - case 'image_file': - return MessageDeltaContentImageFileObject.fromJson(json); - case 'text': - return MessageDeltaContentTextObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'MessageDeltaContent', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$MessageDeltaContent { - /// The index of the content part in the message. - int get index => throw _privateConstructorUsedError; - - /// Always `image_file`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) - imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaContentCopyWith get copyWith => - throw _privateConstructorUsedError; -} + String get text; -/// @nodoc -abstract class $MessageDeltaContentCopyWith<$Res> { - factory $MessageDeltaContentCopyWith( - MessageDeltaContent value, $Res Function(MessageDeltaContent) then) = - _$MessageDeltaContentCopyWithImpl<$Res, MessageDeltaContent>; - @useResult - $Res call({int index, String type}); -} + /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation get fileCitation; -/// @nodoc -class _$MessageDeltaContentCopyWithImpl<$Res, $Val extends MessageDeltaContent> - implements $MessageDeltaContentCopyWith<$Res> { - _$MessageDeltaContentCopyWithImpl(this._value, this._then); + /// The start index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'start_index') + int get startIndex; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The end index of the text in the message content that needs to be replaced. + @override + @JsonKey(name: 'end_index') + int get endIndex; - @pragma('vm:prefer-inline') + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override - $Res call({ - Object? index = null, - Object? type = null, - }) { - return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageContentTextAnnotationsFileCitationObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentImageFileObjectImplCopyWith( - _$MessageDeltaContentImageFileObjectImpl value, - $Res Function(_$MessageDeltaContentImageFileObjectImpl) then) = - __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> + implements $MessageContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith( + _$MessageContentTextAnnotationsFilePathObjectImpl value, + $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) + then) = + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile}); + {String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex}); - $MessageContentImageFileCopyWith<$Res>? get imageFile; + $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath; } /// @nodoc -class __$$MessageDeltaContentImageFileObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentImageFileObjectImpl> - implements _$$MessageDeltaContentImageFileObjectImplCopyWith<$Res> { - __$$MessageDeltaContentImageFileObjectImplCopyWithImpl( - _$MessageDeltaContentImageFileObjectImpl _value, - $Res Function(_$MessageDeltaContentImageFileObjectImpl) _then) +class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFilePathObjectImpl> + implements + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl( + _$MessageContentTextAnnotationsFilePathObjectImpl _value, + $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? imageFile = freezed, + Object? text = null, + Object? filePath = null, + Object? startIndex = null, + Object? endIndex = null, }) { - return _then(_$MessageDeltaContentImageFileObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$MessageContentTextAnnotationsFilePathObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - imageFile: freezed == imageFile - ? _value.imageFile - : imageFile // ignore: cast_nullable_to_non_nullable - as MessageContentImageFile?, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + filePath: null == filePath + ? _value.filePath + : filePath // ignore: cast_nullable_to_non_nullable + as MessageContentTextAnnotationsFilePath, + startIndex: null == startIndex + ? _value.startIndex + : startIndex // ignore: cast_nullable_to_non_nullable + as int, + endIndex: null == endIndex + ? _value.endIndex + : endIndex // ignore: cast_nullable_to_non_nullable + as int, )); } + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentImageFileCopyWith<$Res>? get imageFile { - if (_value.imageFile == null) { - return null; - } - - return $MessageContentImageFileCopyWith<$Res>(_value.imageFile!, (value) { - return _then(_value.copyWith(imageFile: value)); + $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { + return $MessageContentTextAnnotationsFilePathCopyWith<$Res>(_value.filePath, + (value) { + return _then(_value.copyWith(filePath: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentImageFileObjectImpl - extends MessageDeltaContentImageFileObject { - const _$MessageDeltaContentImageFileObjectImpl( - {required this.index, - required this.type, - @JsonKey(name: 'image_file', includeIfNull: false) this.imageFile}) +class _$MessageContentTextAnnotationsFilePathObjectImpl + extends MessageContentTextAnnotationsFilePathObject { + const _$MessageContentTextAnnotationsFilePathObjectImpl( + {required this.type, + required this.text, + @JsonKey(name: 'file_path') required this.filePath, + @JsonKey(name: 'start_index') required this.startIndex, + @JsonKey(name: 'end_index') required this.endIndex}) : super._(); - factory _$MessageDeltaContentImageFileObjectImpl.fromJson( + factory _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentImageFileObjectImplFromJson(json); + _$$MessageContentTextAnnotationsFilePathObjectImplFromJson(json); - /// The index of the content part in the message. + /// Always `file_path`. @override - final int index; + final String type; - /// Always `image_file`. + /// The text in the message content that needs to be replaced. @override - final String type; + final String text; - /// The image file that is part of a message. + /// No Description @override - @JsonKey(name: 'image_file', includeIfNull: false) - final MessageContentImageFile? imageFile; + @JsonKey(name: 'file_path') + final MessageContentTextAnnotationsFilePath filePath; + + /// No Description + @override + @JsonKey(name: 'start_index') + final int startIndex; + + /// No Description + @override + @JsonKey(name: 'end_index') + final int endIndex; @override String toString() { - return 'MessageDeltaContent.imageFile(index: $index, type: $type, imageFile: $imageFile)'; + return 'MessageContentTextAnnotations.filePath(type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentImageFileObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$MessageContentTextAnnotationsFilePathObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.imageFile, imageFile) || - other.imageFile == imageFile)); + (identical(other.text, text) || other.text == text) && + (identical(other.filePath, filePath) || + other.filePath == filePath) && + (identical(other.startIndex, startIndex) || + other.startIndex == startIndex) && + (identical(other.endIndex, endIndex) || + other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, type, imageFile); + int get hashCode => + Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentImageFileObjectImplCopyWith< - _$MessageDeltaContentImageFileObjectImpl> - get copyWith => __$$MessageDeltaContentImageFileObjectImplCopyWithImpl< - _$MessageDeltaContentImageFileObjectImpl>(this, _$identity); + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageContentTextAnnotationsFilePathObjectImpl> + get copyWith => + __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl< + _$MessageContentTextAnnotationsFilePathObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) - imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + fileCitation, + required TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex) + filePath, }) { - return imageFile(index, type, this.imageFile); + return filePath(type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult? Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, }) { - return imageFile?.call(index, type, this.imageFile); + return filePath?.call(type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, + String text, + @JsonKey(name: 'file_citation') + MessageContentTextAnnotationsFileCitation fileCitation, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + fileCitation, + TResult Function( + String type, + String text, + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') int startIndex, + @JsonKey(name: 'end_index') int endIndex)? + filePath, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(index, type, this.imageFile); + if (filePath != null) { + return filePath(type, text, this.filePath, startIndex, endIndex); } return orElse(); } @@ -54439,403 +62825,371 @@ class _$MessageDeltaContentImageFileObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, + required TResult Function( + MessageContentTextAnnotationsFileCitationObject value) + fileCitation, + required TResult Function(MessageContentTextAnnotationsFilePathObject value) + filePath, }) { - return imageFile(this); + return filePath(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, + TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, }) { - return imageFile?.call(this); + return filePath?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, + TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + fileCitation, + TResult Function(MessageContentTextAnnotationsFilePathObject value)? + filePath, required TResult orElse(), }) { - if (imageFile != null) { - return imageFile(this); + if (filePath != null) { + return filePath(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentImageFileObjectImplToJson( + return _$$MessageContentTextAnnotationsFilePathObjectImplToJson( this, ); } } -abstract class MessageDeltaContentImageFileObject extends MessageDeltaContent { - const factory MessageDeltaContentImageFileObject( - {required final int index, - required final String type, - @JsonKey(name: 'image_file', includeIfNull: false) - final MessageContentImageFile? imageFile}) = - _$MessageDeltaContentImageFileObjectImpl; - const MessageDeltaContentImageFileObject._() : super._(); +abstract class MessageContentTextAnnotationsFilePathObject + extends MessageContentTextAnnotations { + const factory MessageContentTextAnnotationsFilePathObject( + {required final String type, + required final String text, + @JsonKey(name: 'file_path') + required final MessageContentTextAnnotationsFilePath filePath, + @JsonKey(name: 'start_index') required final int startIndex, + @JsonKey(name: 'end_index') required final int endIndex}) = + _$MessageContentTextAnnotationsFilePathObjectImpl; + const MessageContentTextAnnotationsFilePathObject._() : super._(); - factory MessageDeltaContentImageFileObject.fromJson( + factory MessageContentTextAnnotationsFilePathObject.fromJson( Map json) = - _$MessageDeltaContentImageFileObjectImpl.fromJson; + _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; + /// Always `file_path`. @override + String get type; - /// The index of the content part in the message. - int get index; + /// The text in the message content that needs to be replaced. + @override + String get text; + + /// No Description + @JsonKey(name: 'file_path') + MessageContentTextAnnotationsFilePath get filePath; + + /// No Description + @override + @JsonKey(name: 'start_index') + int get startIndex; + + /// No Description + @override + @JsonKey(name: 'end_index') + int get endIndex; + + /// Create a copy of MessageContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageContentTextAnnotationsFilePathObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +MessageContentTextAnnotationsFilePath + _$MessageContentTextAnnotationsFilePathFromJson(Map json) { + return _MessageContentTextAnnotationsFilePath.fromJson(json); +} + +/// @nodoc +mixin _$MessageContentTextAnnotationsFilePath { + /// The ID of the file that was generated. + @JsonKey(name: 'file_id') + String get fileId => throw _privateConstructorUsedError; + + /// Serializes this MessageContentTextAnnotationsFilePath to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageContentTextAnnotationsFilePathCopyWith< + MessageContentTextAnnotationsFilePath> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + factory $MessageContentTextAnnotationsFilePathCopyWith( + MessageContentTextAnnotationsFilePath value, + $Res Function(MessageContentTextAnnotationsFilePath) then) = + _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + MessageContentTextAnnotationsFilePath>; + @useResult + $Res call({@JsonKey(name: 'file_id') String fileId}); +} - /// Always `image_file`. - String get type; +/// @nodoc +class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + $Val extends MessageContentTextAnnotationsFilePath> + implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + _$MessageContentTextAnnotationsFilePathCopyWithImpl(this._value, this._then); - /// The image file that is part of a message. - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? get imageFile; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(ignore: true) - _$$MessageDeltaContentImageFileObjectImplCopyWith< - _$MessageDeltaContentImageFileObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? fileId = null, + }) { + return _then(_value.copyWith( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageDeltaContentTextObjectImplCopyWith<$Res> - implements $MessageDeltaContentCopyWith<$Res> { - factory _$$MessageDeltaContentTextObjectImplCopyWith( - _$MessageDeltaContentTextObjectImpl value, - $Res Function(_$MessageDeltaContentTextObjectImpl) then) = - __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res>; +abstract class _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> + implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { + factory _$$MessageContentTextAnnotationsFilePathImplCopyWith( + _$MessageContentTextAnnotationsFilePathImpl value, + $Res Function(_$MessageContentTextAnnotationsFilePathImpl) then) = + __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text}); - - $MessageDeltaContentTextCopyWith<$Res>? get text; + $Res call({@JsonKey(name: 'file_id') String fileId}); } /// @nodoc -class __$$MessageDeltaContentTextObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentCopyWithImpl<$Res, - _$MessageDeltaContentTextObjectImpl> - implements _$$MessageDeltaContentTextObjectImplCopyWith<$Res> { - __$$MessageDeltaContentTextObjectImplCopyWithImpl( - _$MessageDeltaContentTextObjectImpl _value, - $Res Function(_$MessageDeltaContentTextObjectImpl) _then) +class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> + extends _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, + _$MessageContentTextAnnotationsFilePathImpl> + implements _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> { + __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl( + _$MessageContentTextAnnotationsFilePathImpl _value, + $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) : super(_value, _then); + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? type = null, - Object? text = freezed, + Object? fileId = null, }) { - return _then(_$MessageDeltaContentTextObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + return _then(_$MessageContentTextAnnotationsFilePathImpl( + fileId: null == fileId + ? _value.fileId + : fileId // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentText?, )); } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaContentTextCopyWith<$Res>? get text { - if (_value.text == null) { - return null; - } - - return $MessageDeltaContentTextCopyWith<$Res>(_value.text!, (value) { - return _then(_value.copyWith(text: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextObjectImpl - extends MessageDeltaContentTextObject { - const _$MessageDeltaContentTextObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text}) +class _$MessageContentTextAnnotationsFilePathImpl + extends _MessageContentTextAnnotationsFilePath { + const _$MessageContentTextAnnotationsFilePathImpl( + {@JsonKey(name: 'file_id') required this.fileId}) : super._(); - factory _$MessageDeltaContentTextObjectImpl.fromJson( + factory _$MessageContentTextAnnotationsFilePathImpl.fromJson( Map json) => - _$$MessageDeltaContentTextObjectImplFromJson(json); - - /// The index of the content part in the message. - @override - final int index; - - /// Always `text`. - @override - final String type; + _$$MessageContentTextAnnotationsFilePathImplFromJson(json); - /// The text content that is part of a message. + /// The ID of the file that was generated. @override - @JsonKey(includeIfNull: false) - final MessageDeltaContentText? text; + @JsonKey(name: 'file_id') + final String fileId; @override String toString() { - return 'MessageDeltaContent.text(index: $index, type: $type, text: $text)'; + return 'MessageContentTextAnnotationsFilePath(fileId: $fileId)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentTextObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text)); + other is _$MessageContentTextAnnotationsFilePathImpl && + (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, type, text); + int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextObjectImplCopyWith< - _$MessageDeltaContentTextObjectImpl> - get copyWith => __$$MessageDeltaContentTextObjectImplCopyWithImpl< - _$MessageDeltaContentTextObjectImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile) - imageFile, - required TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text) - text, - }) { - return text(index, type, this.text); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - }) { - return text?.call(index, type, this.text); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - int index, - String type, - @JsonKey(name: 'image_file', includeIfNull: false) - MessageContentImageFile? imageFile)? - imageFile, - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) MessageDeltaContentText? text)? - text, - required TResult orElse(), - }) { - if (text != null) { - return text(index, type, this.text); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function(MessageDeltaContentImageFileObject value) - imageFile, - required TResult Function(MessageDeltaContentTextObject value) text, - }) { - return text(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult? Function(MessageDeltaContentTextObject value)? text, - }) { - return text?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(MessageDeltaContentImageFileObject value)? imageFile, - TResult Function(MessageDeltaContentTextObject value)? text, - required TResult orElse(), - }) { - if (text != null) { - return text(this); - } - return orElse(); - } + _$$MessageContentTextAnnotationsFilePathImplCopyWith< + _$MessageContentTextAnnotationsFilePathImpl> + get copyWith => __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl< + _$MessageContentTextAnnotationsFilePathImpl>(this, _$identity); @override Map toJson() { - return _$$MessageDeltaContentTextObjectImplToJson( + return _$$MessageContentTextAnnotationsFilePathImplToJson( this, ); } } -abstract class MessageDeltaContentTextObject extends MessageDeltaContent { - const factory MessageDeltaContentTextObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final MessageDeltaContentText? text}) = - _$MessageDeltaContentTextObjectImpl; - const MessageDeltaContentTextObject._() : super._(); - - factory MessageDeltaContentTextObject.fromJson(Map json) = - _$MessageDeltaContentTextObjectImpl.fromJson; +abstract class _MessageContentTextAnnotationsFilePath + extends MessageContentTextAnnotationsFilePath { + const factory _MessageContentTextAnnotationsFilePath( + {@JsonKey(name: 'file_id') required final String fileId}) = + _$MessageContentTextAnnotationsFilePathImpl; + const _MessageContentTextAnnotationsFilePath._() : super._(); - @override + factory _MessageContentTextAnnotationsFilePath.fromJson( + Map json) = + _$MessageContentTextAnnotationsFilePathImpl.fromJson; - /// The index of the content part in the message. - int get index; + /// The ID of the file that was generated. @override + @JsonKey(name: 'file_id') + String get fileId; - /// Always `text`. - String get type; - - /// The text content that is part of a message. - @JsonKey(includeIfNull: false) - MessageDeltaContentText? get text; + /// Create a copy of MessageContentTextAnnotationsFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageDeltaContentTextObjectImplCopyWith< - _$MessageDeltaContentTextObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageContentTextAnnotationsFilePathImplCopyWith< + _$MessageContentTextAnnotationsFilePathImpl> get copyWith => throw _privateConstructorUsedError; } -MessageContentTextAnnotations _$MessageContentTextAnnotationsFromJson( +MessageDeltaContentTextAnnotations _$MessageDeltaContentTextAnnotationsFromJson( Map json) { switch (json['type']) { case 'file_citation': - return MessageContentTextAnnotationsFileCitationObject.fromJson(json); + return MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( + json); case 'file_path': - return MessageContentTextAnnotationsFilePathObject.fromJson(json); + return MessageDeltaContentTextAnnotationsFilePathObject.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'MessageContentTextAnnotations', + 'MessageDeltaContentTextAnnotations', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$MessageContentTextAnnotations { +mixin _$MessageDeltaContentTextAnnotations { + /// The index of the annotation in the text content part. + int get index => throw _privateConstructorUsedError; + /// Always `file_citation`. String get type => throw _privateConstructorUsedError; /// The text in the message content that needs to be replaced. - String get text => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get text => throw _privateConstructorUsedError; /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index') - int get startIndex => throw _privateConstructorUsedError; + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex => throw _privateConstructorUsedError; /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index') - int get endIndex => throw _privateConstructorUsedError; + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) fileCitation, required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, required TResult orElse(), }) => @@ -54843,161 +63197,198 @@ mixin _$MessageContentTextAnnotations { @optionalTypeArgs TResult map({ required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) + MessageDeltaContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this MessageDeltaContentTextAnnotations to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageContentTextAnnotationsCopyWith + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageDeltaContentTextAnnotationsCopyWith< + MessageDeltaContentTextAnnotations> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageContentTextAnnotationsCopyWith<$Res> { - factory $MessageContentTextAnnotationsCopyWith( - MessageContentTextAnnotations value, - $Res Function(MessageContentTextAnnotations) then) = - _$MessageContentTextAnnotationsCopyWithImpl<$Res, - MessageContentTextAnnotations>; +abstract class $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory $MessageDeltaContentTextAnnotationsCopyWith( + MessageDeltaContentTextAnnotations value, + $Res Function(MessageDeltaContentTextAnnotations) then) = + _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + MessageDeltaContentTextAnnotations>; @useResult $Res call( - {String type, - String text, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); } /// @nodoc -class _$MessageContentTextAnnotationsCopyWithImpl<$Res, - $Val extends MessageContentTextAnnotations> - implements $MessageContentTextAnnotationsCopyWith<$Res> { - _$MessageContentTextAnnotationsCopyWithImpl(this._value, this._then); +class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + $Val extends MessageDeltaContentTextAnnotations> + implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + _$MessageDeltaContentTextAnnotationsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? text = null, - Object? startIndex = null, - Object? endIndex = null, + Object? text = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: null == text + text: freezed == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String, - startIndex: null == startIndex + as String?, + startIndex: freezed == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex + as int?, + endIndex: freezed == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int, + as int?, ) as $Val); } } /// @nodoc -abstract class _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> implements $MessageContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith( - _$MessageContentTextAnnotationsFileCitationObjectImpl value, - $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) +abstract class _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) then) = - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res>; + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( - {String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); - $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation; + $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? + get fileCitation; } /// @nodoc -class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFileCitationObjectImpl> +class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + $Res> + extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> implements - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl( - _$MessageContentTextAnnotationsFileCitationObjectImpl _value, - $Res Function(_$MessageContentTextAnnotationsFileCitationObjectImpl) + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + $Res> { + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _value, + $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? text = null, - Object? fileCitation = null, - Object? startIndex = null, - Object? endIndex = null, + Object? text = freezed, + Object? fileCitation = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { - return _then(_$MessageContentTextAnnotationsFileCitationObjectImpl( + return _then(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: null == text + text: freezed == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String, - fileCitation: null == fileCitation + as String?, + fileCitation: freezed == fileCitation ? _value.fileCitation : fileCitation // ignore: cast_nullable_to_non_nullable - as MessageContentTextAnnotationsFileCitation, - startIndex: null == startIndex + as MessageDeltaContentTextAnnotationsFileCitation?, + startIndex: freezed == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex + as int?, + endIndex: freezed == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int, + as int?, )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentTextAnnotationsFileCitationCopyWith<$Res> get fileCitation { - return $MessageContentTextAnnotationsFileCitationCopyWith<$Res>( - _value.fileCitation, (value) { + $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? + get fileCitation { + if (_value.fileCitation == null) { + return null; + } + + return $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>( + _value.fileCitation!, (value) { return _then(_value.copyWith(fileCitation: value)); }); } @@ -55005,19 +63396,24 @@ class __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentTextAnnotationsFileCitationObjectImpl - extends MessageContentTextAnnotationsFileCitationObject { - const _$MessageContentTextAnnotationsFileCitationObjectImpl( - {required this.type, - required this.text, - @JsonKey(name: 'file_citation') required this.fileCitation, - @JsonKey(name: 'start_index') required this.startIndex, - @JsonKey(name: 'end_index') required this.endIndex}) +class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl + extends MessageDeltaContentTextAnnotationsFileCitationObject { + const _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text, + @JsonKey(name: 'file_citation', includeIfNull: false) this.fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) : super._(); - factory _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson( + factory _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson( Map json) => - _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson(json); + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson(json); + + /// The index of the annotation in the text content part. + @override + final int index; /// Always `file_citation`. @override @@ -55025,33 +63421,36 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl /// The text in the message content that needs to be replaced. @override - final String text; + @JsonKey(includeIfNull: false) + final String? text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. @override - @JsonKey(name: 'file_citation') - final MessageContentTextAnnotationsFileCitation fileCitation; + @JsonKey(name: 'file_citation', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFileCitation? fileCitation; /// The start index of the text in the message content that needs to be replaced. @override - @JsonKey(name: 'start_index') - final int startIndex; + @JsonKey(name: 'start_index', includeIfNull: false) + final int? startIndex; /// The end index of the text in the message content that needs to be replaced. @override - @JsonKey(name: 'end_index') - final int endIndex; + @JsonKey(name: 'end_index', includeIfNull: false) + final int? endIndex; @override String toString() { - return 'MessageContentTextAnnotations.fileCitation(type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; + return 'MessageDeltaContentTextAnnotations.fileCitation(index: $index, type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFileCitationObjectImpl && + other + is _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && (identical(other.text, text) || other.text == text) && (identical(other.fileCitation, fileCitation) || @@ -55062,91 +63461,101 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, type, text, fileCitation, startIndex, endIndex); + int get hashCode => Object.hash( + runtimeType, index, type, text, fileCitation, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageContentTextAnnotationsFileCitationObjectImpl> + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> get copyWith => - __$$MessageContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - _$MessageContentTextAnnotationsFileCitationObjectImpl>( + __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) fileCitation, required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) filePath, }) { - return fileCitation(type, text, this.fileCitation, startIndex, endIndex); + return fileCitation( + index, type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, }) { return fileCitation?.call( - type, text, this.fileCitation, startIndex, endIndex); + index, type, text, this.fileCitation, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, required TResult orElse(), }) { if (fileCitation != null) { - return fileCitation(type, text, this.fileCitation, startIndex, endIndex); + return fileCitation( + index, type, text, this.fileCitation, startIndex, endIndex); } return orElse(); } @@ -55155,9 +63564,10 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) + MessageDeltaContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) filePath, }) { return fileCitation(this); @@ -55166,9 +63576,10 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, }) { return fileCitation?.call(this); @@ -55177,9 +63588,10 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) { @@ -55191,127 +63603,154 @@ class _$MessageContentTextAnnotationsFileCitationObjectImpl @override Map toJson() { - return _$$MessageContentTextAnnotationsFileCitationObjectImplToJson( + return _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplToJson( this, ); } } -abstract class MessageContentTextAnnotationsFileCitationObject - extends MessageContentTextAnnotations { - const factory MessageContentTextAnnotationsFileCitationObject( - {required final String type, - required final String text, - @JsonKey(name: 'file_citation') - required final MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') required final int startIndex, - @JsonKey(name: 'end_index') required final int endIndex}) = - _$MessageContentTextAnnotationsFileCitationObjectImpl; - const MessageContentTextAnnotationsFileCitationObject._() : super._(); +abstract class MessageDeltaContentTextAnnotationsFileCitationObject + extends MessageDeltaContentTextAnnotations { + const factory MessageDeltaContentTextAnnotationsFileCitationObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) + final int? + endIndex}) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl; + const MessageDeltaContentTextAnnotationsFileCitationObject._() : super._(); - factory MessageContentTextAnnotationsFileCitationObject.fromJson( + factory MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( Map json) = - _$MessageContentTextAnnotationsFileCitationObjectImpl.fromJson; + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; + /// The index of the annotation in the text content part. @override + int get index; /// Always `file_citation`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. - String get text; + @override + @JsonKey(includeIfNull: false) + String? get text; /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation get fileCitation; - @override + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index') - int get startIndex; @override + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex; /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index') - int get endIndex; @override - @JsonKey(ignore: true) - _$$MessageContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageContentTextAnnotationsFileCitationObjectImpl> + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> - implements $MessageContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith( - _$MessageContentTextAnnotationsFilePathObjectImpl value, - $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) +abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { + factory _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith( + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl value, + $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) then) = - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res>; + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( - {String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex}); + {int index, + String type, + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); - $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath; + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? + get filePath; } /// @nodoc -class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFilePathObjectImpl> +class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> + extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> implements - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl( - _$MessageContentTextAnnotationsFilePathObjectImpl _value, - $Res Function(_$MessageContentTextAnnotationsFilePathObjectImpl) _then) + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _value, + $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) + _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? index = null, Object? type = null, - Object? text = null, - Object? filePath = null, - Object? startIndex = null, - Object? endIndex = null, + Object? text = freezed, + Object? filePath = freezed, + Object? startIndex = freezed, + Object? endIndex = freezed, }) { - return _then(_$MessageContentTextAnnotationsFilePathObjectImpl( + return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: null == text + text: freezed == text ? _value.text : text // ignore: cast_nullable_to_non_nullable - as String, - filePath: null == filePath + as String?, + filePath: freezed == filePath ? _value.filePath : filePath // ignore: cast_nullable_to_non_nullable - as MessageContentTextAnnotationsFilePath, - startIndex: null == startIndex + as MessageDeltaContentTextAnnotationsFilePathObjectFilePath?, + startIndex: freezed == startIndex ? _value.startIndex : startIndex // ignore: cast_nullable_to_non_nullable - as int, - endIndex: null == endIndex + as int?, + endIndex: freezed == endIndex ? _value.endIndex : endIndex // ignore: cast_nullable_to_non_nullable - as int, + as int?, )); } + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageContentTextAnnotationsFilePathCopyWith<$Res> get filePath { - return $MessageContentTextAnnotationsFilePathCopyWith<$Res>(_value.filePath, - (value) { + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? + get filePath { + if (_value.filePath == null) { + return null; + } + + return $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res>(_value.filePath!, (value) { return _then(_value.copyWith(filePath: value)); }); } @@ -55319,19 +63758,24 @@ class __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$MessageContentTextAnnotationsFilePathObjectImpl - extends MessageContentTextAnnotationsFilePathObject { - const _$MessageContentTextAnnotationsFilePathObjectImpl( - {required this.type, - required this.text, - @JsonKey(name: 'file_path') required this.filePath, - @JsonKey(name: 'start_index') required this.startIndex, - @JsonKey(name: 'end_index') required this.endIndex}) +class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl + extends MessageDeltaContentTextAnnotationsFilePathObject { + const _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( + {required this.index, + required this.type, + @JsonKey(includeIfNull: false) this.text, + @JsonKey(name: 'file_path', includeIfNull: false) this.filePath, + @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) : super._(); - factory _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson( + factory _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson( Map json) => - _$$MessageContentTextAnnotationsFilePathObjectImplFromJson(json); + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson(json); + + /// The index of the annotation in the text content part. + @override + final int index; /// Always `file_path`. @override @@ -55339,33 +63783,35 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl /// The text in the message content that needs to be replaced. @override - final String text; + @JsonKey(includeIfNull: false) + final String? text; /// No Description @override - @JsonKey(name: 'file_path') - final MessageContentTextAnnotationsFilePath filePath; + @JsonKey(name: 'file_path', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath; /// No Description @override - @JsonKey(name: 'start_index') - final int startIndex; + @JsonKey(name: 'start_index', includeIfNull: false) + final int? startIndex; /// No Description @override - @JsonKey(name: 'end_index') - final int endIndex; + @JsonKey(name: 'end_index', includeIfNull: false) + final int? endIndex; @override String toString() { - return 'MessageContentTextAnnotations.filePath(type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; + return 'MessageDeltaContentTextAnnotations.filePath(index: $index, type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFilePathObjectImpl && + other is _$MessageDeltaContentTextAnnotationsFilePathObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.type, type) || other.type == type) && (identical(other.text, text) || other.text == text) && (identical(other.filePath, filePath) || @@ -55376,90 +63822,99 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl other.endIndex == endIndex)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, type, text, filePath, startIndex, endIndex); + int get hashCode => Object.hash( + runtimeType, index, type, text, filePath, startIndex, endIndex); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageContentTextAnnotationsFilePathObjectImpl> + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => - __$$MessageContentTextAnnotationsFilePathObjectImplCopyWithImpl< - _$MessageContentTextAnnotationsFilePathObjectImpl>( + __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) fileCitation, required TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex) + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) filePath, }) { - return filePath(type, text, this.filePath, startIndex, endIndex); + return filePath(index, type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult? Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, }) { - return filePath?.call(type, text, this.filePath, startIndex, endIndex); + return filePath?.call( + index, type, text, this.filePath, startIndex, endIndex); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_citation') - MessageContentTextAnnotationsFileCitation fileCitation, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_citation', includeIfNull: false) + MessageDeltaContentTextAnnotationsFileCitation? fileCitation, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? fileCitation, TResult Function( + int index, String type, - String text, - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') int startIndex, - @JsonKey(name: 'end_index') int endIndex)? + @JsonKey(includeIfNull: false) String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? filePath, required TResult orElse(), }) { if (filePath != null) { - return filePath(type, text, this.filePath, startIndex, endIndex); + return filePath(index, type, text, this.filePath, startIndex, endIndex); } return orElse(); } @@ -55468,9 +63923,10 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - MessageContentTextAnnotationsFileCitationObject value) + MessageDeltaContentTextAnnotationsFileCitationObject value) fileCitation, - required TResult Function(MessageContentTextAnnotationsFilePathObject value) + required TResult Function( + MessageDeltaContentTextAnnotationsFilePathObject value) filePath, }) { return filePath(this); @@ -55479,9 +63935,10 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult? Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult? Function(MessageContentTextAnnotationsFilePathObject value)? + TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, }) { return filePath?.call(this); @@ -55490,9 +63947,10 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(MessageContentTextAnnotationsFileCitationObject value)? + TResult Function( + MessageDeltaContentTextAnnotationsFileCitationObject value)? fileCitation, - TResult Function(MessageContentTextAnnotationsFilePathObject value)? + TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? filePath, required TResult orElse(), }) { @@ -55504,673 +63962,788 @@ class _$MessageContentTextAnnotationsFilePathObjectImpl @override Map toJson() { - return _$$MessageContentTextAnnotationsFilePathObjectImplToJson( + return _$$MessageDeltaContentTextAnnotationsFilePathObjectImplToJson( this, ); } } -abstract class MessageContentTextAnnotationsFilePathObject - extends MessageContentTextAnnotations { - const factory MessageContentTextAnnotationsFilePathObject( - {required final String type, - required final String text, - @JsonKey(name: 'file_path') - required final MessageContentTextAnnotationsFilePath filePath, - @JsonKey(name: 'start_index') required final int startIndex, - @JsonKey(name: 'end_index') required final int endIndex}) = - _$MessageContentTextAnnotationsFilePathObjectImpl; - const MessageContentTextAnnotationsFilePathObject._() : super._(); +abstract class MessageDeltaContentTextAnnotationsFilePathObject + extends MessageDeltaContentTextAnnotations { + const factory MessageDeltaContentTextAnnotationsFilePathObject( + {required final int index, + required final String type, + @JsonKey(includeIfNull: false) final String? text, + @JsonKey(name: 'file_path', includeIfNull: false) + final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, + @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, + @JsonKey(name: 'end_index', includeIfNull: false) + final int? + endIndex}) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl; + const MessageDeltaContentTextAnnotationsFilePathObject._() : super._(); - factory MessageContentTextAnnotationsFilePathObject.fromJson( + factory MessageDeltaContentTextAnnotationsFilePathObject.fromJson( Map json) = - _$MessageContentTextAnnotationsFilePathObjectImpl.fromJson; + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; + /// The index of the annotation in the text content part. @override + int get index; /// Always `file_path`. - String get type; @override + String get type; /// The text in the message content that needs to be replaced. - String get text; + @override + @JsonKey(includeIfNull: false) + String? get text; /// No Description - @JsonKey(name: 'file_path') - MessageContentTextAnnotationsFilePath get filePath; - @override + @JsonKey(name: 'file_path', includeIfNull: false) + MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; /// No Description - @JsonKey(name: 'start_index') - int get startIndex; @override + @JsonKey(name: 'start_index', includeIfNull: false) + int? get startIndex; /// No Description - @JsonKey(name: 'end_index') - int get endIndex; @override - @JsonKey(ignore: true) - _$$MessageContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageContentTextAnnotationsFilePathObjectImpl> + @JsonKey(name: 'end_index', includeIfNull: false) + int? get endIndex; + + /// Create a copy of MessageDeltaContentTextAnnotations + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> get copyWith => throw _privateConstructorUsedError; } -MessageContentTextAnnotationsFilePath - _$MessageContentTextAnnotationsFilePathFromJson(Map json) { - return _MessageContentTextAnnotationsFilePath.fromJson(json); +MessageDeltaContentTextAnnotationsFilePathObjectFilePath + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathFromJson( + Map json) { + return _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( + json); } /// @nodoc -mixin _$MessageContentTextAnnotationsFilePath { +mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { /// The ID of the file that was generated. - @JsonKey(name: 'file_id') - String get fileId => throw _privateConstructorUsedError; + @JsonKey(name: 'file_id', includeIfNull: false) + String? get fileId => throw _privateConstructorUsedError; + /// Serializes this MessageDeltaContentTextAnnotationsFilePathObjectFilePath to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageContentTextAnnotationsFilePathCopyWith< - MessageContentTextAnnotationsFilePath> + + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + MessageDeltaContentTextAnnotationsFilePathObjectFilePath> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - factory $MessageContentTextAnnotationsFilePathCopyWith( - MessageContentTextAnnotationsFilePath value, - $Res Function(MessageContentTextAnnotationsFilePath) then) = - _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - MessageContentTextAnnotationsFilePath>; +abstract class $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + factory $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith( + MessageDeltaContentTextAnnotationsFilePathObjectFilePath value, + $Res Function( + MessageDeltaContentTextAnnotationsFilePathObjectFilePath) + then) = + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, MessageDeltaContentTextAnnotationsFilePathObjectFilePath>; @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); } /// @nodoc -class _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - $Val extends MessageContentTextAnnotationsFilePath> - implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - _$MessageContentTextAnnotationsFilePathCopyWithImpl(this._value, this._then); +class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, + $Val extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath> + implements + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, + Object? fileId = freezed, }) { return _then(_value.copyWith( - fileId: null == fileId + fileId: freezed == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable - as String, + as String?, ) as $Val); } } /// @nodoc -abstract class _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> - implements $MessageContentTextAnnotationsFilePathCopyWith<$Res> { - factory _$$MessageContentTextAnnotationsFilePathImplCopyWith( - _$MessageContentTextAnnotationsFilePathImpl value, - $Res Function(_$MessageContentTextAnnotationsFilePathImpl) then) = - __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res>; +abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + $Res> + implements + $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< + $Res> { + factory _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) + then) = + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + $Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id') String fileId}); + $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); } /// @nodoc -class __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl<$Res> - extends _$MessageContentTextAnnotationsFilePathCopyWithImpl<$Res, - _$MessageContentTextAnnotationsFilePathImpl> - implements _$$MessageContentTextAnnotationsFilePathImplCopyWith<$Res> { - __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl( - _$MessageContentTextAnnotationsFilePathImpl _value, - $Res Function(_$MessageContentTextAnnotationsFilePathImpl) _then) +class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + $Res> + extends _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< + $Res, _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + implements + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + $Res> { + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl _value, + $Res Function( + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) + _then) : super(_value, _then); + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = null, + Object? fileId = freezed, }) { - return _then(_$MessageContentTextAnnotationsFilePathImpl( - fileId: null == fileId + return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( + fileId: freezed == fileId ? _value.fileId : fileId // ignore: cast_nullable_to_non_nullable - as String, + as String?, )); } } /// @nodoc @JsonSerializable() -class _$MessageContentTextAnnotationsFilePathImpl - extends _MessageContentTextAnnotationsFilePath { - const _$MessageContentTextAnnotationsFilePathImpl( - {@JsonKey(name: 'file_id') required this.fileId}) +class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl + extends _MessageDeltaContentTextAnnotationsFilePathObjectFilePath { + const _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( + {@JsonKey(name: 'file_id', includeIfNull: false) this.fileId}) : super._(); - factory _$MessageContentTextAnnotationsFilePathImpl.fromJson( + factory _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson( Map json) => - _$$MessageContentTextAnnotationsFilePathImplFromJson(json); + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplFromJson( + json); /// The ID of the file that was generated. @override - @JsonKey(name: 'file_id') - final String fileId; + @JsonKey(name: 'file_id', includeIfNull: false) + final String? fileId; @override String toString() { - return 'MessageContentTextAnnotationsFilePath(fileId: $fileId)'; + return 'MessageDeltaContentTextAnnotationsFilePathObjectFilePath(fileId: $fileId)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageContentTextAnnotationsFilePathImpl && + other + is _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl && (identical(other.fileId, fileId) || other.fileId == fileId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fileId); - @JsonKey(ignore: true) + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageContentTextAnnotationsFilePathImplCopyWith< - _$MessageContentTextAnnotationsFilePathImpl> - get copyWith => __$$MessageContentTextAnnotationsFilePathImplCopyWithImpl< - _$MessageContentTextAnnotationsFilePathImpl>(this, _$identity); + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + get copyWith => + __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl>( + this, _$identity); @override Map toJson() { - return _$$MessageContentTextAnnotationsFilePathImplToJson( + return _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplToJson( this, ); } } -abstract class _MessageContentTextAnnotationsFilePath - extends MessageContentTextAnnotationsFilePath { - const factory _MessageContentTextAnnotationsFilePath( - {@JsonKey(name: 'file_id') required final String fileId}) = - _$MessageContentTextAnnotationsFilePathImpl; - const _MessageContentTextAnnotationsFilePath._() : super._(); +abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath + extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath { + const factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath( + {@JsonKey(name: 'file_id', includeIfNull: false) + final String? fileId}) = + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl; + const _MessageDeltaContentTextAnnotationsFilePathObjectFilePath._() + : super._(); - factory _MessageContentTextAnnotationsFilePath.fromJson( + factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( Map json) = - _$MessageContentTextAnnotationsFilePathImpl.fromJson; + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; + /// The ID of the file that was generated. @override + @JsonKey(name: 'file_id', includeIfNull: false) + String? get fileId; - /// The ID of the file that was generated. - @JsonKey(name: 'file_id') - String get fileId; + /// Create a copy of MessageDeltaContentTextAnnotationsFilePathObjectFilePath + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageContentTextAnnotationsFilePathImplCopyWith< - _$MessageContentTextAnnotationsFilePathImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< + _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> get copyWith => throw _privateConstructorUsedError; } -MessageDeltaContentTextAnnotations _$MessageDeltaContentTextAnnotationsFromJson( - Map json) { +RunStepDetails _$RunStepDetailsFromJson(Map json) { switch (json['type']) { - case 'file_citation': - return MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( - json); - case 'file_path': - return MessageDeltaContentTextAnnotationsFilePathObject.fromJson(json); + case 'message_creation': + return RunStepDetailsMessageCreationObject.fromJson(json); + case 'tool_calls': + return RunStepDetailsToolCallsObject.fromJson(json); default: - throw CheckedFromJsonException( - json, - 'type', - 'MessageDeltaContentTextAnnotations', + throw CheckedFromJsonException(json, 'type', 'RunStepDetails', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$MessageDeltaContentTextAnnotations { - /// The index of the annotation in the text content part. - int get index => throw _privateConstructorUsedError; - - /// Always `file_citation`. +mixin _$RunStepDetails { + /// Always `message_creation`. String get type => throw _privateConstructorUsedError; - - /// The text in the message content that needs to be replaced. - @JsonKey(includeIfNull: false) - String? get text => throw _privateConstructorUsedError; - - /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex => throw _privateConstructorUsedError; - - /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) - filePath, + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetails to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaContentTextAnnotationsCopyWith< - MessageDeltaContentTextAnnotations> - get copyWith => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory $MessageDeltaContentTextAnnotationsCopyWith( - MessageDeltaContentTextAnnotations value, - $Res Function(MessageDeltaContentTextAnnotations) then) = - _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - MessageDeltaContentTextAnnotations>; +abstract class $RunStepDetailsCopyWith<$Res> { + factory $RunStepDetailsCopyWith( + RunStepDetails value, $Res Function(RunStepDetails) then) = + _$RunStepDetailsCopyWithImpl<$Res, RunStepDetails>; @useResult - $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + $Res call({String type}); } /// @nodoc -class _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - $Val extends MessageDeltaContentTextAnnotations> - implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - _$MessageDeltaContentTextAnnotationsCopyWithImpl(this._value, this._then); +class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> + implements $RunStepDetailsCopyWith<$Res> { + _$RunStepDetailsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, }) { return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String?, - startIndex: freezed == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int?, ) as $Val); } } /// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) - then) = - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - $Res>; +abstract class _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> + implements $RunStepDetailsCopyWith<$Res> { + factory _$$RunStepDetailsMessageCreationObjectImplCopyWith( + _$RunStepDetailsMessageCreationObjectImpl value, + $Res Function(_$RunStepDetailsMessageCreationObjectImpl) then) = + __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String type, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation}); + + $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation; +} + +/// @nodoc +class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsCopyWithImpl<$Res, + _$RunStepDetailsMessageCreationObjectImpl> + implements _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> { + __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl( + _$RunStepDetailsMessageCreationObjectImpl _value, + $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? messageCreation = null, + }) { + return _then(_$RunStepDetailsMessageCreationObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + messageCreation: null == messageCreation + ? _value.messageCreation + : messageCreation // ignore: cast_nullable_to_non_nullable + as RunStepDetailsMessageCreation, + )); + } + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { + return $RunStepDetailsMessageCreationCopyWith<$Res>(_value.messageCreation, + (value) { + return _then(_value.copyWith(messageCreation: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsMessageCreationObjectImpl + extends RunStepDetailsMessageCreationObject { + const _$RunStepDetailsMessageCreationObjectImpl( + {required this.type, + @JsonKey(name: 'message_creation') required this.messageCreation}) + : super._(); + + factory _$RunStepDetailsMessageCreationObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsMessageCreationObjectImplFromJson(json); + + /// Always `message_creation`. + @override + final String type; + + /// Details of the message creation by the run step. + @override + @JsonKey(name: 'message_creation') + final RunStepDetailsMessageCreation messageCreation; + + @override + String toString() { + return 'RunStepDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsMessageCreationObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.messageCreation, messageCreation) || + other.messageCreation == messageCreation)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, messageCreation); + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDetailsMessageCreationObjectImpl> + get copyWith => __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl< + _$RunStepDetailsMessageCreationObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, + required TResult Function( + String type, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, + }) { + return messageCreation(type, this.messageCreation); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, + TResult? Function( + String type, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, + }) { + return messageCreation?.call(type, this.messageCreation); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, + TResult Function( + String type, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, + required TResult orElse(), + }) { + if (messageCreation != null) { + return messageCreation(type, this.messageCreation); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + }) { + return messageCreation(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + }) { + return messageCreation?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + required TResult orElse(), + }) { + if (messageCreation != null) { + return messageCreation(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$RunStepDetailsMessageCreationObjectImplToJson( + this, + ); + } +} + +abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { + const factory RunStepDetailsMessageCreationObject( + {required final String type, + @JsonKey(name: 'message_creation') + required final RunStepDetailsMessageCreation messageCreation}) = + _$RunStepDetailsMessageCreationObjectImpl; + const RunStepDetailsMessageCreationObject._() : super._(); + + factory RunStepDetailsMessageCreationObject.fromJson( + Map json) = + _$RunStepDetailsMessageCreationObjectImpl.fromJson; + + /// Always `message_creation`. + @override + String get type; + + /// Details of the message creation by the run step. + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation get messageCreation; + + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDetailsMessageCreationObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> + implements $RunStepDetailsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsObjectImplCopyWith( + _$RunStepDetailsToolCallsObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsObjectImpl) then) = + __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); - - $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? - get fileCitation; + {String type, + @JsonKey(name: 'tool_calls') List toolCalls}); } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - $Res> - extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> - implements - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - $Res> { - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _value, - $Res Function(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl) - _then) +class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsObjectImpl> + implements _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? fileCitation = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, + Object? toolCalls = null, }) { - return _then(_$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$RunStepDetailsToolCallsObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String?, - fileCitation: freezed == fileCitation - ? _value.fileCitation - : fileCitation // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentTextAnnotationsFileCitation?, - startIndex: freezed == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int?, + toolCalls: null == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List, )); } - - @override - @pragma('vm:prefer-inline') - $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>? - get fileCitation { - if (_value.fileCitation == null) { - return null; - } - - return $MessageDeltaContentTextAnnotationsFileCitationCopyWith<$Res>( - _value.fileCitation!, (value) { - return _then(_value.copyWith(fileCitation: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl - extends MessageDeltaContentTextAnnotationsFileCitationObject { - const _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text, - @JsonKey(name: 'file_citation', includeIfNull: false) this.fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) - : super._(); +class _$RunStepDetailsToolCallsObjectImpl + extends RunStepDetailsToolCallsObject { + const _$RunStepDetailsToolCallsObjectImpl( + {required this.type, + @JsonKey(name: 'tool_calls') + required final List toolCalls}) + : _toolCalls = toolCalls, + super._(); - factory _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson(json); - - /// The index of the annotation in the text content part. - @override - final int index; + _$$RunStepDetailsToolCallsObjectImplFromJson(json); - /// Always `file_citation`. + /// Always `tool_calls`. @override final String type; - /// The text in the message content that needs to be replaced. - @override - @JsonKey(includeIfNull: false) - final String? text; - - /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @override - @JsonKey(name: 'file_citation', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFileCitation? fileCitation; - - /// The start index of the text in the message content that needs to be replaced. - @override - @JsonKey(name: 'start_index', includeIfNull: false) - final int? startIndex; + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + final List _toolCalls; - /// The end index of the text in the message content that needs to be replaced. + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. @override - @JsonKey(name: 'end_index', includeIfNull: false) - final int? endIndex; + @JsonKey(name: 'tool_calls') + List get toolCalls { + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_toolCalls); + } @override String toString() { - return 'MessageDeltaContentTextAnnotations.fileCitation(index: $index, type: $type, text: $text, fileCitation: $fileCitation, startIndex: $startIndex, endIndex: $endIndex)'; + return 'RunStepDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$RunStepDetailsToolCallsObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text) && - (identical(other.fileCitation, fileCitation) || - other.fileCitation == fileCitation) && - (identical(other.startIndex, startIndex) || - other.startIndex == startIndex) && - (identical(other.endIndex, endIndex) || - other.endIndex == endIndex)); + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( - runtimeType, index, type, text, fileCitation, startIndex, endIndex); + runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> - get copyWith => - __$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl>( - this, _$identity); + _$$RunStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDetailsToolCallsObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation) + messageCreation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls) + toolCalls, }) { - return fileCitation( - index, type, text, this.fileCitation, startIndex, endIndex); + return toolCalls(type, this.toolCalls); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, }) { - return fileCitation?.call( - index, type, text, this.fileCitation, startIndex, endIndex); + return toolCalls?.call(type, this.toolCalls); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation') + RunStepDetailsMessageCreation messageCreation)? + messageCreation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls') + List toolCalls)? + toolCalls, required TResult orElse(), }) { - if (fileCitation != null) { - return fileCitation( - index, type, text, this.fileCitation, startIndex, endIndex); + if (toolCalls != null) { + return toolCalls(type, this.toolCalls); } return orElse(); } @@ -56178,350 +64751,373 @@ class _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) - filePath, + required TResult Function(RunStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, }) { - return fileCitation(this); + return toolCalls(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult? Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, }) { - return fileCitation?.call(this); + return toolCalls?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult Function(RunStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) { - if (fileCitation != null) { - return fileCitation(this); + if (toolCalls != null) { + return toolCalls(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplToJson( + return _$$RunStepDetailsToolCallsObjectImplToJson( this, ); } } -abstract class MessageDeltaContentTextAnnotationsFileCitationObject - extends MessageDeltaContentTextAnnotations { - const factory MessageDeltaContentTextAnnotationsFileCitationObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) - final int? - endIndex}) = _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl; - const MessageDeltaContentTextAnnotationsFileCitationObject._() : super._(); +abstract class RunStepDetailsToolCallsObject extends RunStepDetails { + const factory RunStepDetailsToolCallsObject( + {required final String type, + @JsonKey(name: 'tool_calls') + required final List toolCalls}) = + _$RunStepDetailsToolCallsObjectImpl; + const RunStepDetailsToolCallsObject._() : super._(); - factory MessageDeltaContentTextAnnotationsFileCitationObject.fromJson( - Map json) = - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl.fromJson; + factory RunStepDetailsToolCallsObject.fromJson(Map json) = + _$RunStepDetailsToolCallsObjectImpl.fromJson; + /// Always `tool_calls`. @override + String get type; - /// The index of the annotation in the text content part. - int get index; - @override + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + @JsonKey(name: 'tool_calls') + List get toolCalls; - /// Always `file_citation`. - String get type; + /// Create a copy of RunStepDetails + /// with the given fields replaced by the non-null parameter values. @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDetailsToolCallsObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} - /// The text in the message content that needs to be replaced. - @JsonKey(includeIfNull: false) - String? get text; +RunStepDeltaDetails _$RunStepDeltaDetailsFromJson(Map json) { + switch (json['type']) { + case 'message_creation': + return RunStepDeltaStepDetailsMessageCreationObject.fromJson(json); + case 'tool_calls': + return RunStepDeltaStepDetailsToolCallsObject.fromJson(json); - /// A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? get fileCitation; - @override + default: + throw CheckedFromJsonException(json, 'type', 'RunStepDeltaDetails', + 'Invalid union type "${json['type']}"!'); + } +} - /// The start index of the text in the message content that needs to be replaced. - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex; - @override +/// @nodoc +mixin _$RunStepDeltaDetails { + /// Always `message_creation`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, + required TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult? Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; - /// The end index of the text in the message content that needs to be replaced. - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex; + /// Serializes this RunStepDeltaDetails to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDeltaDetailsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDeltaDetailsCopyWith<$Res> { + factory $RunStepDeltaDetailsCopyWith( + RunStepDeltaDetails value, $Res Function(RunStepDeltaDetails) then) = + _$RunStepDeltaDetailsCopyWithImpl<$Res, RunStepDeltaDetails>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> + implements $RunStepDeltaDetailsCopyWith<$Res> { + _$RunStepDeltaDetailsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(ignore: true) - _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - $Res> implements $MessageDeltaContentTextAnnotationsCopyWith<$Res> { - factory _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith( - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl value, - $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) +abstract class _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> + implements $RunStepDeltaDetailsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith( + _$RunStepDeltaStepDetailsMessageCreationObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) then) = - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< - $Res>; + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex}); + {String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation}); - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? - get filePath; + $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation; } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl<$Res> - extends _$MessageDeltaContentTextAnnotationsCopyWithImpl<$Res, - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> +class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaDetailsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> implements - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith<$Res> { - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _value, - $Res Function(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl) - _then) + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsMessageCreationObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? text = freezed, - Object? filePath = freezed, - Object? startIndex = freezed, - Object? endIndex = freezed, + Object? messageCreation = freezed, }) { - return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$RunStepDeltaStepDetailsMessageCreationObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - text: freezed == text - ? _value.text - : text // ignore: cast_nullable_to_non_nullable - as String?, - filePath: freezed == filePath - ? _value.filePath - : filePath // ignore: cast_nullable_to_non_nullable - as MessageDeltaContentTextAnnotationsFilePathObjectFilePath?, - startIndex: freezed == startIndex - ? _value.startIndex - : startIndex // ignore: cast_nullable_to_non_nullable - as int?, - endIndex: freezed == endIndex - ? _value.endIndex - : endIndex // ignore: cast_nullable_to_non_nullable - as int?, + messageCreation: freezed == messageCreation + ? _value.messageCreation + : messageCreation // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsMessageCreation?, )); } + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith<$Res>? - get filePath { - if (_value.filePath == null) { + $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { + if (_value.messageCreation == null) { return null; } - return $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res>(_value.filePath!, (value) { - return _then(_value.copyWith(filePath: value)); + return $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>( + _value.messageCreation!, (value) { + return _then(_value.copyWith(messageCreation: value)); }); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl - extends MessageDeltaContentTextAnnotationsFilePathObject { - const _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.text, - @JsonKey(name: 'file_path', includeIfNull: false) this.filePath, - @JsonKey(name: 'start_index', includeIfNull: false) this.startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) this.endIndex}) +class _$RunStepDeltaStepDetailsMessageCreationObjectImpl + extends RunStepDeltaStepDetailsMessageCreationObject { + const _$RunStepDeltaStepDetailsMessageCreationObjectImpl( + {required this.type, + @JsonKey(name: 'message_creation', includeIfNull: false) + this.messageCreation}) : super._(); - factory _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson(json); - - /// The index of the annotation in the text content part. - @override - final int index; + _$$RunStepDeltaStepDetailsMessageCreationObjectImplFromJson(json); - /// Always `file_path`. + /// Always `message_creation`. @override final String type; - /// The text in the message content that needs to be replaced. - @override - @JsonKey(includeIfNull: false) - final String? text; - - /// No Description - @override - @JsonKey(name: 'file_path', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath; - - /// No Description - @override - @JsonKey(name: 'start_index', includeIfNull: false) - final int? startIndex; - - /// No Description + /// Details of the message creation by the run step. @override - @JsonKey(name: 'end_index', includeIfNull: false) - final int? endIndex; + @JsonKey(name: 'message_creation', includeIfNull: false) + final RunStepDeltaStepDetailsMessageCreation? messageCreation; @override String toString() { - return 'MessageDeltaContentTextAnnotations.filePath(index: $index, type: $type, text: $text, filePath: $filePath, startIndex: $startIndex, endIndex: $endIndex)'; + return 'RunStepDeltaDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$MessageDeltaContentTextAnnotationsFilePathObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$RunStepDeltaStepDetailsMessageCreationObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.text, text) || other.text == text) && - (identical(other.filePath, filePath) || - other.filePath == filePath) && - (identical(other.startIndex, startIndex) || - other.startIndex == startIndex) && - (identical(other.endIndex, endIndex) || - other.endIndex == endIndex)); + (identical(other.messageCreation, messageCreation) || + other.messageCreation == messageCreation)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, index, type, text, filePath, startIndex, endIndex); + int get hashCode => Object.hash(runtimeType, type, messageCreation); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> get copyWith => - __$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl>( + __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - fileCitation, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, required TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex) - filePath, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, }) { - return filePath(index, type, text, this.filePath, startIndex, endIndex); + return messageCreation(type, this.messageCreation); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, TResult? Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, }) { - return filePath?.call( - index, type, text, this.filePath, startIndex, endIndex); + return messageCreation?.call(type, this.messageCreation); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_citation', includeIfNull: false) - MessageDeltaContentTextAnnotationsFileCitation? fileCitation, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - fileCitation, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, TResult Function( - int index, String type, - @JsonKey(includeIfNull: false) String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) int? endIndex)? - filePath, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, required TResult orElse(), }) { - if (filePath != null) { - return filePath(index, type, text, this.filePath, startIndex, endIndex); + if (messageCreation != null) { + return messageCreation(type, this.messageCreation); } return orElse(); } @@ -56530,405 +65126,466 @@ class _$MessageDeltaContentTextAnnotationsFilePathObjectImpl @optionalTypeArgs TResult map({ required TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value) - fileCitation, - required TResult Function( - MessageDeltaContentTextAnnotationsFilePathObject value) - filePath, + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, }) { - return filePath(this); + return messageCreation(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult? Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, }) { - return filePath?.call(this); + return messageCreation?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - MessageDeltaContentTextAnnotationsFileCitationObject value)? - fileCitation, - TResult Function(MessageDeltaContentTextAnnotationsFilePathObject value)? - filePath, + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, required TResult orElse(), }) { - if (filePath != null) { - return filePath(this); + if (messageCreation != null) { + return messageCreation(this); } return orElse(); } @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFilePathObjectImplToJson( + return _$$RunStepDeltaStepDetailsMessageCreationObjectImplToJson( this, ); } } -abstract class MessageDeltaContentTextAnnotationsFilePathObject - extends MessageDeltaContentTextAnnotations { - const factory MessageDeltaContentTextAnnotationsFilePathObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? text, - @JsonKey(name: 'file_path', includeIfNull: false) - final MessageDeltaContentTextAnnotationsFilePathObjectFilePath? filePath, - @JsonKey(name: 'start_index', includeIfNull: false) final int? startIndex, - @JsonKey(name: 'end_index', includeIfNull: false) - final int? - endIndex}) = _$MessageDeltaContentTextAnnotationsFilePathObjectImpl; - const MessageDeltaContentTextAnnotationsFilePathObject._() : super._(); +abstract class RunStepDeltaStepDetailsMessageCreationObject + extends RunStepDeltaDetails { + const factory RunStepDeltaStepDetailsMessageCreationObject( + {required final String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + final RunStepDeltaStepDetailsMessageCreation? messageCreation}) = + _$RunStepDeltaStepDetailsMessageCreationObjectImpl; + const RunStepDeltaStepDetailsMessageCreationObject._() : super._(); - factory MessageDeltaContentTextAnnotationsFilePathObject.fromJson( + factory RunStepDeltaStepDetailsMessageCreationObject.fromJson( Map json) = - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl.fromJson; - - @override + _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; - /// The index of the annotation in the text content part. - int get index; + /// Always `message_creation`. @override - - /// Always `file_path`. String get type; - @override - - /// The text in the message content that needs to be replaced. - @JsonKey(includeIfNull: false) - String? get text; - - /// No Description - @JsonKey(name: 'file_path', includeIfNull: false) - MessageDeltaContentTextAnnotationsFilePathObjectFilePath? get filePath; - @override - /// No Description - @JsonKey(name: 'start_index', includeIfNull: false) - int? get startIndex; - @override + /// Details of the message creation by the run step. + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? get messageCreation; - /// No Description - @JsonKey(name: 'end_index', includeIfNull: false) - int? get endIndex; + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageDeltaContentTextAnnotationsFilePathObjectImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -MessageDeltaContentTextAnnotationsFilePathObjectFilePath - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathFromJson( - Map json) { - return _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( - json); -} - -/// @nodoc -mixin _$MessageDeltaContentTextAnnotationsFilePathObjectFilePath { - /// The ID of the file that was generated. - @JsonKey(name: 'file_id', includeIfNull: false) - String? get fileId => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - MessageDeltaContentTextAnnotationsFilePathObjectFilePath> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< + _$RunStepDeltaStepDetailsMessageCreationObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - factory $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith( - MessageDeltaContentTextAnnotationsFilePathObjectFilePath value, - $Res Function( - MessageDeltaContentTextAnnotationsFilePathObjectFilePath) - then) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, MessageDeltaContentTextAnnotationsFilePathObjectFilePath>; - @useResult - $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); -} - -/// @nodoc -class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, - $Val extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath> - implements - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl( - this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? fileId = freezed, - }) { - return _then(_value.copyWith( - fileId: freezed == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - $Res> - implements - $MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWith< - $Res> { - factory _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) - then) = - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - $Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> + implements $RunStepDeltaDetailsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) then) = + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({@JsonKey(name: 'file_id', includeIfNull: false) String? fileId}); + $Res call( + {String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls}); } /// @nodoc -class __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - $Res> - extends _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathCopyWithImpl< - $Res, _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> - implements - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - $Res> { - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl _value, - $Res Function( - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl) - _then) +class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaDetailsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsObjectImpl> + implements _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? fileId = freezed, + Object? type = null, + Object? toolCalls = freezed, }) { - return _then(_$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( - fileId: freezed == fileId - ? _value.fileId - : fileId // ignore: cast_nullable_to_non_nullable - as String?, + return _then(_$RunStepDeltaStepDetailsToolCallsObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + toolCalls: freezed == toolCalls + ? _value._toolCalls + : toolCalls // ignore: cast_nullable_to_non_nullable + as List?, )); } } /// @nodoc @JsonSerializable() -class _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl - extends _MessageDeltaContentTextAnnotationsFilePathObjectFilePath { - const _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl( - {@JsonKey(name: 'file_id', includeIfNull: false) this.fileId}) - : super._(); +class _$RunStepDeltaStepDetailsToolCallsObjectImpl + extends RunStepDeltaStepDetailsToolCallsObject { + const _$RunStepDeltaStepDetailsToolCallsObjectImpl( + {required this.type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) + : _toolCalls = toolCalls, + super._(); - factory _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson( Map json) => - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplFromJson( - json); + _$$RunStepDeltaStepDetailsToolCallsObjectImplFromJson(json); - /// The ID of the file that was generated. + /// Always `tool_calls`. @override - @JsonKey(name: 'file_id', includeIfNull: false) - final String? fileId; + final String type; + + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + final List? _toolCalls; + + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + @override + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls { + final value = _toolCalls; + if (value == null) return null; + if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } @override String toString() { - return 'MessageDeltaContentTextAnnotationsFilePathObjectFilePath(fileId: $fileId)'; + return 'RunStepDeltaDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDeltaStepDetailsToolCallsObjectImpl && + (identical(other.type, type) || other.type == type) && + const DeepCollectionEquality() + .equals(other._toolCalls, _toolCalls)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsObjectImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation) + messageCreation, + required TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls) + toolCalls, + }) { + return toolCalls(type, this.toolCalls); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult? Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + }) { + return toolCalls?.call(type, this.toolCalls); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function( + String type, + @JsonKey(name: 'message_creation', includeIfNull: false) + RunStepDeltaStepDetailsMessageCreation? messageCreation)? + messageCreation, + TResult Function( + String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? toolCalls)? + toolCalls, + required TResult orElse(), + }) { + if (toolCalls != null) { + return toolCalls(type, this.toolCalls); + } + return orElse(); } @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other - is _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl && - (identical(other.fileId, fileId) || other.fileId == fileId)); + @optionalTypeArgs + TResult map({ + required TResult Function( + RunStepDeltaStepDetailsMessageCreationObject value) + messageCreation, + required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) + toolCalls, + }) { + return toolCalls(this); } - @JsonKey(ignore: true) @override - int get hashCode => Object.hash(runtimeType, fileId); + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + }) { + return toolCalls?.call(this); + } - @JsonKey(ignore: true) @override - @pragma('vm:prefer-inline') - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> - get copyWith => - __$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWithImpl< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl>( - this, _$identity); + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? + messageCreation, + TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + required TResult orElse(), + }) { + if (toolCalls != null) { + return toolCalls(this); + } + return orElse(); + } @override Map toJson() { - return _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsObjectImplToJson( this, ); } } -abstract class _MessageDeltaContentTextAnnotationsFilePathObjectFilePath - extends MessageDeltaContentTextAnnotationsFilePathObjectFilePath { - const factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath( - {@JsonKey(name: 'file_id', includeIfNull: false) - final String? fileId}) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl; - const _MessageDeltaContentTextAnnotationsFilePathObjectFilePath._() - : super._(); +abstract class RunStepDeltaStepDetailsToolCallsObject + extends RunStepDeltaDetails { + const factory RunStepDeltaStepDetailsToolCallsObject( + {required final String type, + @JsonKey(name: 'tool_calls', includeIfNull: false) + final List? toolCalls}) = + _$RunStepDeltaStepDetailsToolCallsObjectImpl; + const RunStepDeltaStepDetailsToolCallsObject._() : super._(); - factory _MessageDeltaContentTextAnnotationsFilePathObjectFilePath.fromJson( + factory RunStepDeltaStepDetailsToolCallsObject.fromJson( Map json) = - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; + /// Always `tool_calls`. @override + String get type; - /// The ID of the file that was generated. - @JsonKey(name: 'file_id', includeIfNull: false) - String? get fileId; + /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + @JsonKey(name: 'tool_calls', includeIfNull: false) + List? get toolCalls; + + /// Create a copy of RunStepDeltaDetails + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImplCopyWith< - _$MessageDeltaContentTextAnnotationsFilePathObjectFilePathImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsObjectImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetails _$RunStepDetailsFromJson(Map json) { +RunStepDetailsToolCalls _$RunStepDetailsToolCallsFromJson( + Map json) { switch (json['type']) { - case 'message_creation': - return RunStepDetailsMessageCreationObject.fromJson(json); - case 'tool_calls': - return RunStepDetailsToolCallsObject.fromJson(json); + case 'code_interpreter': + return RunStepDetailsToolCallsCodeObject.fromJson(json); + case 'file_search': + return RunStepDetailsToolCallsFileSearchObject.fromJson(json); + case 'function': + return RunStepDetailsToolCallsFunctionObject.fromJson(json); default: - throw CheckedFromJsonException(json, 'type', 'RunStepDetails', + throw CheckedFromJsonException(json, 'type', 'RunStepDetailsToolCalls', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetails { - /// Always `message_creation`. +mixin _$RunStepDetailsToolCalls { + /// The ID of the tool call. + String get id => throw _privateConstructorUsedError; + + /// Always `code_interpreter`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + codeInterpreter, required TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) + fileSearch, + required TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function) + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult? Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult? Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + required TResult Function(RunStepDetailsToolCallsCodeObject value) + codeInterpreter, + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + fileSearch, + required TResult Function(RunStepDetailsToolCallsFunctionObject value) + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDetailsCopyWith get copyWith => + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsCopyWith<$Res> { - factory $RunStepDetailsCopyWith( - RunStepDetails value, $Res Function(RunStepDetails) then) = - _$RunStepDetailsCopyWithImpl<$Res, RunStepDetails>; +abstract class $RunStepDetailsToolCallsCopyWith<$Res> { + factory $RunStepDetailsToolCallsCopyWith(RunStepDetailsToolCalls value, + $Res Function(RunStepDetailsToolCalls) then) = + _$RunStepDetailsToolCallsCopyWithImpl<$Res, RunStepDetailsToolCalls>; @useResult - $Res call({String type}); + $Res call({String id, String type}); } /// @nodoc -class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> - implements $RunStepDetailsCopyWith<$Res> { - _$RunStepDetailsCopyWithImpl(this._value, this._then); +class _$RunStepDetailsToolCallsCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCalls> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + _$RunStepDetailsToolCallsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? type = null, }) { return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -56938,160 +65595,195 @@ class _$RunStepDetailsCopyWithImpl<$Res, $Val extends RunStepDetails> } /// @nodoc -abstract class _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> - implements $RunStepDetailsCopyWith<$Res> { - factory _$$RunStepDetailsMessageCreationObjectImplCopyWith( - _$RunStepDetailsMessageCreationObjectImpl value, - $Res Function(_$RunStepDetailsMessageCreationObjectImpl) then) = - __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) then) = + __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation}); + {String id, + String type, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter}); - $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation; + $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> + get codeInterpreter; } /// @nodoc -class __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsCopyWithImpl<$Res, - _$RunStepDetailsMessageCreationObjectImpl> - implements _$$RunStepDetailsMessageCreationObjectImplCopyWith<$Res> { - __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl( - _$RunStepDetailsMessageCreationObjectImpl _value, - $Res Function(_$RunStepDetailsMessageCreationObjectImpl) _then) +class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeObjectImpl> + implements _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? type = null, - Object? messageCreation = null, + Object? codeInterpreter = null, }) { - return _then(_$RunStepDetailsMessageCreationObjectImpl( + return _then(_$RunStepDetailsToolCallsCodeObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - messageCreation: null == messageCreation - ? _value.messageCreation - : messageCreation // ignore: cast_nullable_to_non_nullable - as RunStepDetailsMessageCreation, + codeInterpreter: null == codeInterpreter + ? _value.codeInterpreter + : codeInterpreter // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsCodeObjectCodeInterpreter, )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsMessageCreationCopyWith<$Res> get messageCreation { - return $RunStepDetailsMessageCreationCopyWith<$Res>(_value.messageCreation, - (value) { - return _then(_value.copyWith(messageCreation: value)); + $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> + get codeInterpreter { + return $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>( + _value.codeInterpreter, (value) { + return _then(_value.copyWith(codeInterpreter: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsMessageCreationObjectImpl - extends RunStepDetailsMessageCreationObject { - const _$RunStepDetailsMessageCreationObjectImpl( - {required this.type, - @JsonKey(name: 'message_creation') required this.messageCreation}) +class _$RunStepDetailsToolCallsCodeObjectImpl + extends RunStepDetailsToolCallsCodeObject { + const _$RunStepDetailsToolCallsCodeObjectImpl( + {required this.id, + required this.type, + @JsonKey(name: 'code_interpreter') required this.codeInterpreter}) : super._(); - factory _$RunStepDetailsMessageCreationObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsCodeObjectImpl.fromJson( Map json) => - _$$RunStepDetailsMessageCreationObjectImplFromJson(json); + _$$RunStepDetailsToolCallsCodeObjectImplFromJson(json); - /// Always `message_creation`. + /// The ID of the tool call. + @override + final String id; + + /// Always `code_interpreter`. @override final String type; - /// Details of the message creation by the run step. + /// The Code Interpreter tool call definition. @override - @JsonKey(name: 'message_creation') - final RunStepDetailsMessageCreation messageCreation; + @JsonKey(name: 'code_interpreter') + final RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter; @override String toString() { - return 'RunStepDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; + return 'RunStepDetailsToolCalls.codeInterpreter(id: $id, type: $type, codeInterpreter: $codeInterpreter)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsMessageCreationObjectImpl && + other is _$RunStepDetailsToolCallsCodeObjectImpl && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.messageCreation, messageCreation) || - other.messageCreation == messageCreation)); + (identical(other.codeInterpreter, codeInterpreter) || + other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, messageCreation); + int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDetailsMessageCreationObjectImpl> - get copyWith => __$$RunStepDetailsMessageCreationObjectImplCopyWithImpl< - _$RunStepDetailsMessageCreationObjectImpl>(this, _$identity); + _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + codeInterpreter, required TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) + fileSearch, + required TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function) + function, }) { - return messageCreation(type, this.messageCreation); + return codeInterpreter(id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult? Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult? Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, }) { - return messageCreation?.call(type, this.messageCreation); + return codeInterpreter?.call(id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(type, this.messageCreation); + if (codeInterpreter != null) { + return codeInterpreter(id, type, this.codeInterpreter); } return orElse(); } @@ -57099,225 +65791,273 @@ class _$RunStepDetailsMessageCreationObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + required TResult Function(RunStepDetailsToolCallsCodeObject value) + codeInterpreter, + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + fileSearch, + required TResult Function(RunStepDetailsToolCallsFunctionObject value) + function, }) { - return messageCreation(this); + return codeInterpreter(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { - return messageCreation?.call(this); + return codeInterpreter?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(this); + if (codeInterpreter != null) { + return codeInterpreter(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsMessageCreationObjectImplToJson( + return _$$RunStepDetailsToolCallsCodeObjectImplToJson( this, ); } } -abstract class RunStepDetailsMessageCreationObject extends RunStepDetails { - const factory RunStepDetailsMessageCreationObject( - {required final String type, - @JsonKey(name: 'message_creation') - required final RunStepDetailsMessageCreation messageCreation}) = - _$RunStepDetailsMessageCreationObjectImpl; - const RunStepDetailsMessageCreationObject._() : super._(); +abstract class RunStepDetailsToolCallsCodeObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsCodeObject( + {required final String id, + required final String type, + @JsonKey(name: 'code_interpreter') + required final RunStepDetailsToolCallsCodeObjectCodeInterpreter + codeInterpreter}) = _$RunStepDetailsToolCallsCodeObjectImpl; + const RunStepDetailsToolCallsCodeObject._() : super._(); - factory RunStepDetailsMessageCreationObject.fromJson( + factory RunStepDetailsToolCallsCodeObject.fromJson( Map json) = - _$RunStepDetailsMessageCreationObjectImpl.fromJson; + _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; + /// The ID of the tool call. @override + String get id; - /// Always `message_creation`. + /// Always `code_interpreter`. + @override String get type; - /// Details of the message creation by the run step. - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation get messageCreation; + /// The Code Interpreter tool call definition. + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDetailsMessageCreationObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> - implements $RunStepDetailsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsObjectImplCopyWith( - _$RunStepDetailsToolCallsObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsObjectImpl) then) = - __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith( + _$RunStepDetailsToolCallsFileSearchObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) then) = + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String type, - @JsonKey(name: 'tool_calls') List toolCalls}); + {String id, + String type, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch}); + + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch; } /// @nodoc -class __$$RunStepDetailsToolCallsObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsObjectImpl> - implements _$$RunStepDetailsToolCallsObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsObjectImpl) _then) +class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFileSearchObjectImpl> + implements _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFileSearchObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? type = null, - Object? toolCalls = null, + Object? fileSearch = null, }) { - return _then(_$RunStepDetailsToolCallsObjectImpl( + return _then(_$RunStepDetailsToolCallsFileSearchObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - toolCalls: null == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List, + fileSearch: null == fileSearch + ? _value.fileSearch + : fileSearch // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFileSearch, )); } + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $RunStepDetailsToolCallsFileSearchCopyWith<$Res> get fileSearch { + return $RunStepDetailsToolCallsFileSearchCopyWith<$Res>(_value.fileSearch, + (value) { + return _then(_value.copyWith(fileSearch: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsObjectImpl - extends RunStepDetailsToolCallsObject { - const _$RunStepDetailsToolCallsObjectImpl( - {required this.type, - @JsonKey(name: 'tool_calls') - required final List toolCalls}) - : _toolCalls = toolCalls, - super._(); +class _$RunStepDetailsToolCallsFileSearchObjectImpl + extends RunStepDetailsToolCallsFileSearchObject { + const _$RunStepDetailsToolCallsFileSearchObjectImpl( + {required this.id, + required this.type, + @JsonKey(name: 'file_search') required this.fileSearch}) + : super._(); - factory _$RunStepDetailsToolCallsObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsObjectImplFromJson(json); + _$$RunStepDetailsToolCallsFileSearchObjectImplFromJson(json); - /// Always `tool_calls`. + /// The ID of the tool call object. @override - final String type; + final String id; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - final List _toolCalls; + /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override + final String type; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + /// The definition of the file search that was called. @override - @JsonKey(name: 'tool_calls') - List get toolCalls { - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_toolCalls); - } + @JsonKey(name: 'file_search') + final RunStepDetailsToolCallsFileSearch fileSearch; @override String toString() { - return 'RunStepDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; + return 'RunStepDetailsToolCalls.fileSearch(id: $id, type: $type, fileSearch: $fileSearch)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsObjectImpl && + other is _$RunStepDetailsToolCallsFileSearchObjectImpl && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls)); + (identical(other.fileSearch, fileSearch) || + other.fileSearch == fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); + int get hashCode => Object.hash(runtimeType, id, type, fileSearch); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDetailsToolCallsObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsObjectImpl>(this, _$identity); + _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFileSearchObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation) - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + codeInterpreter, required TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls) - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) + fileSearch, + required TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function) + function, }) { - return toolCalls(type, this.toolCalls); + return fileSearch(id, type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult? Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult? Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, }) { - return toolCalls?.call(type, this.toolCalls); + return fileSearch?.call(id, type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + String id, String type, - @JsonKey(name: 'message_creation') - RunStepDetailsMessageCreation messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls') - List toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, required TResult orElse(), }) { - if (toolCalls != null) { - return toolCalls(type, this.toolCalls); + if (fileSearch != null) { + return fileSearch(id, type, this.fileSearch); } return orElse(); } @@ -57325,358 +66065,265 @@ class _$RunStepDetailsToolCallsObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDetailsToolCallsObject value) toolCalls, + required TResult Function(RunStepDetailsToolCallsCodeObject value) + codeInterpreter, + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + fileSearch, + required TResult Function(RunStepDetailsToolCallsFunctionObject value) + function, }) { - return toolCalls(this); + return fileSearch(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { - return toolCalls?.call(this); + return fileSearch?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDetailsToolCallsObject value)? toolCalls, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { - if (toolCalls != null) { - return toolCalls(this); + if (fileSearch != null) { + return fileSearch(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsObjectImplToJson( + return _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsObject extends RunStepDetails { - const factory RunStepDetailsToolCallsObject( - {required final String type, - @JsonKey(name: 'tool_calls') - required final List toolCalls}) = - _$RunStepDetailsToolCallsObjectImpl; - const RunStepDetailsToolCallsObject._() : super._(); +abstract class RunStepDetailsToolCallsFileSearchObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsFileSearchObject( + {required final String id, + required final String type, + @JsonKey(name: 'file_search') + required final RunStepDetailsToolCallsFileSearch fileSearch}) = + _$RunStepDetailsToolCallsFileSearchObjectImpl; + const RunStepDetailsToolCallsFileSearchObject._() : super._(); - factory RunStepDetailsToolCallsObject.fromJson(Map json) = - _$RunStepDetailsToolCallsObjectImpl.fromJson; + factory RunStepDetailsToolCallsFileSearchObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; + /// The ID of the tool call object. @override + String get id; - /// Always `tool_calls`. - String get type; - - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - @JsonKey(name: 'tool_calls') - List get toolCalls; + /// The type of tool call. This is always going to be `file_search` for this type of tool call. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDetailsToolCallsObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -RunStepDeltaDetails _$RunStepDeltaDetailsFromJson(Map json) { - switch (json['type']) { - case 'message_creation': - return RunStepDeltaStepDetailsMessageCreationObject.fromJson(json); - case 'tool_calls': - return RunStepDeltaStepDetailsToolCallsObject.fromJson(json); - - default: - throw CheckedFromJsonException(json, 'type', 'RunStepDeltaDetails', - 'Invalid union type "${json['type']}"!'); - } -} - -/// @nodoc -mixin _$RunStepDeltaDetails { - /// Always `message_creation`. - String get type => throw _privateConstructorUsedError; - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, - required TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult? Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult map({ - required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - }) => - throw _privateConstructorUsedError; - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDeltaDetailsCopyWith get copyWith => - throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $RunStepDeltaDetailsCopyWith<$Res> { - factory $RunStepDeltaDetailsCopyWith( - RunStepDeltaDetails value, $Res Function(RunStepDeltaDetails) then) = - _$RunStepDeltaDetailsCopyWithImpl<$Res, RunStepDeltaDetails>; - @useResult - $Res call({String type}); -} - -/// @nodoc -class _$RunStepDeltaDetailsCopyWithImpl<$Res, $Val extends RunStepDeltaDetails> - implements $RunStepDeltaDetailsCopyWith<$Res> { - _$RunStepDeltaDetailsCopyWithImpl(this._value, this._then); + String get type; - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + /// The definition of the file search that was called. + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch get fileSearch; - @pragma('vm:prefer-inline') + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - $Res call({ - Object? type = null, - }) { - return _then(_value.copyWith( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDetailsToolCallsFileSearchObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> - implements $RunStepDeltaDetailsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith( - _$RunStepDeltaStepDetailsMessageCreationObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) - then) = - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith( + _$RunStepDetailsToolCallsFunctionObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) then) = + __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation}); + $Res call({String id, String type, RunStepDetailsToolCallsFunction function}); - $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation; + $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function; } /// @nodoc -class __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaDetailsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> - implements - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsMessageCreationObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsMessageCreationObjectImpl) _then) +class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFunctionObjectImpl> + implements _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsFunctionObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ + Object? id = null, Object? type = null, - Object? messageCreation = freezed, + Object? function = null, }) { - return _then(_$RunStepDeltaStepDetailsMessageCreationObjectImpl( + return _then(_$RunStepDetailsToolCallsFunctionObjectImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - messageCreation: freezed == messageCreation - ? _value.messageCreation - : messageCreation // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsMessageCreation?, + function: null == function + ? _value.function + : function // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsFunction, )); } + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>? get messageCreation { - if (_value.messageCreation == null) { - return null; - } - - return $RunStepDeltaStepDetailsMessageCreationCopyWith<$Res>( - _value.messageCreation!, (value) { - return _then(_value.copyWith(messageCreation: value)); + $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { + return $RunStepDetailsToolCallsFunctionCopyWith<$Res>(_value.function, + (value) { + return _then(_value.copyWith(function: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsMessageCreationObjectImpl - extends RunStepDeltaStepDetailsMessageCreationObject { - const _$RunStepDeltaStepDetailsMessageCreationObjectImpl( - {required this.type, - @JsonKey(name: 'message_creation', includeIfNull: false) - this.messageCreation}) +class _$RunStepDetailsToolCallsFunctionObjectImpl + extends RunStepDetailsToolCallsFunctionObject { + const _$RunStepDetailsToolCallsFunctionObjectImpl( + {required this.id, required this.type, required this.function}) : super._(); - factory _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsMessageCreationObjectImplFromJson(json); + _$$RunStepDetailsToolCallsFunctionObjectImplFromJson(json); - /// Always `message_creation`. + /// The ID of the tool call object. + @override + final String id; + + /// Always `function`. @override final String type; - /// Details of the message creation by the run step. + /// The definition of the function that was called. @override - @JsonKey(name: 'message_creation', includeIfNull: false) - final RunStepDeltaStepDetailsMessageCreation? messageCreation; + final RunStepDetailsToolCallsFunction function; @override String toString() { - return 'RunStepDeltaDetails.messageCreation(type: $type, messageCreation: $messageCreation)'; + return 'RunStepDetailsToolCalls.function(id: $id, type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsMessageCreationObjectImpl && + other is _$RunStepDetailsToolCallsFunctionObjectImpl && + (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.messageCreation, messageCreation) || - other.messageCreation == messageCreation)); + (identical(other.function, function) || + other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, messageCreation); + int get hashCode => Object.hash(runtimeType, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl>( - this, _$identity); + _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDetailsToolCallsFunctionObjectImpl> + get copyWith => __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsFunctionObjectImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( + String id, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + codeInterpreter, required TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch) + fileSearch, + required TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function) + function, }) { - return messageCreation(type, this.messageCreation); + return function(id, type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( + String id, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult? Function( + String id, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult? Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, }) { - return messageCreation?.call(type, this.messageCreation); + return function?.call(id, type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( + String id, String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, + @JsonKey(name: 'code_interpreter') + RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + codeInterpreter, TResult Function( + String id, String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, + @JsonKey(name: 'file_search') + RunStepDetailsToolCallsFileSearch fileSearch)? + fileSearch, + TResult Function( + String id, String type, RunStepDetailsToolCallsFunction function)? + function, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(type, this.messageCreation); + if (function != null) { + return function(id, type, this.function); } return orElse(); } @@ -57684,443 +66331,489 @@ class _$RunStepDeltaStepDetailsMessageCreationObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, + required TResult Function(RunStepDetailsToolCallsCodeObject value) + codeInterpreter, + required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + fileSearch, + required TResult Function(RunStepDetailsToolCallsFunctionObject value) + function, }) { - return messageCreation(this); + return function(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, }) { - return messageCreation?.call(this); + return function?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, + TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, + TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, + TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, required TResult orElse(), }) { - if (messageCreation != null) { - return messageCreation(this); + if (function != null) { + return function(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsMessageCreationObjectImplToJson( + return _$$RunStepDetailsToolCallsFunctionObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsMessageCreationObject - extends RunStepDeltaDetails { - const factory RunStepDeltaStepDetailsMessageCreationObject( - {required final String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - final RunStepDeltaStepDetailsMessageCreation? messageCreation}) = - _$RunStepDeltaStepDetailsMessageCreationObjectImpl; - const RunStepDeltaStepDetailsMessageCreationObject._() : super._(); +abstract class RunStepDetailsToolCallsFunctionObject + extends RunStepDetailsToolCalls { + const factory RunStepDetailsToolCallsFunctionObject( + {required final String id, + required final String type, + required final RunStepDetailsToolCallsFunction function}) = + _$RunStepDetailsToolCallsFunctionObjectImpl; + const RunStepDetailsToolCallsFunctionObject._() : super._(); + + factory RunStepDetailsToolCallsFunctionObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; + + /// The ID of the tool call object. + @override + String get id; + + /// Always `function`. + @override + String get type; + + /// The definition of the function that was called. + RunStepDetailsToolCallsFunction get function; + + /// Create a copy of RunStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDetailsToolCallsFunctionObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDetailsToolCallsFunction _$RunStepDetailsToolCallsFunctionFromJson( + Map json) { + return _RunStepDetailsToolCallsFunction.fromJson(json); +} + +/// @nodoc +mixin _$RunStepDetailsToolCallsFunction { + /// The name of the function. + String get name => throw _privateConstructorUsedError; + + /// The arguments passed to the function. + String get arguments => throw _privateConstructorUsedError; + + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + String? get output => throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsFunction to a JSON map. + Map toJson() => throw _privateConstructorUsedError; - factory RunStepDeltaStepDetailsMessageCreationObject.fromJson( - Map json) = - _$RunStepDeltaStepDetailsMessageCreationObjectImpl.fromJson; + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsFunctionCopyWith + get copyWith => throw _privateConstructorUsedError; +} - @override +/// @nodoc +abstract class $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + factory $RunStepDetailsToolCallsFunctionCopyWith( + RunStepDetailsToolCallsFunction value, + $Res Function(RunStepDetailsToolCallsFunction) then) = + _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + RunStepDetailsToolCallsFunction>; + @useResult + $Res call({String name, String arguments, String? output}); +} - /// Always `message_creation`. - String get type; +/// @nodoc +class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsFunction> + implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + _$RunStepDetailsToolCallsFunctionCopyWithImpl(this._value, this._then); - /// Details of the message creation by the run step. - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? get messageCreation; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsMessageCreationObjectImplCopyWith< - _$RunStepDeltaStepDetailsMessageCreationObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? name = null, + Object? arguments = null, + Object? output = freezed, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String, + output: freezed == output + ? _value.output + : output // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> - implements $RunStepDeltaDetailsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> + implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsFunctionImplCopyWith( + _$RunStepDetailsToolCallsFunctionImpl value, + $Res Function(_$RunStepDetailsToolCallsFunctionImpl) then) = + __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls}); + $Res call({String name, String arguments, String? output}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaDetailsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsObjectImpl> - implements _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsObjectImpl) _then) +class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, + _$RunStepDetailsToolCallsFunctionImpl> + implements _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl( + _$RunStepDetailsToolCallsFunctionImpl _value, + $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? type = null, - Object? toolCalls = freezed, + Object? name = null, + Object? arguments = null, + Object? output = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsObjectImpl( - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable + return _then(_$RunStepDetailsToolCallsFunctionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable as String, - toolCalls: freezed == toolCalls - ? _value._toolCalls - : toolCalls // ignore: cast_nullable_to_non_nullable - as List?, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String, + output: freezed == output + ? _value.output + : output // ignore: cast_nullable_to_non_nullable + as String?, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsObjectImpl - extends RunStepDeltaStepDetailsToolCallsObject { - const _$RunStepDeltaStepDetailsToolCallsObjectImpl( - {required this.type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) - : _toolCalls = toolCalls, - super._(); +class _$RunStepDetailsToolCallsFunctionImpl + extends _RunStepDetailsToolCallsFunction { + const _$RunStepDetailsToolCallsFunctionImpl( + {required this.name, required this.arguments, required this.output}) + : super._(); - factory _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsFunctionImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsObjectImplFromJson(json); + _$$RunStepDetailsToolCallsFunctionImplFromJson(json); - /// Always `tool_calls`. + /// The name of the function. @override - final String type; + final String name; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - final List? _toolCalls; + /// The arguments passed to the function. + @override + final String arguments; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. @override - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls { - final value = _toolCalls; - if (value == null) return null; - if (_toolCalls is EqualUnmodifiableListView) return _toolCalls; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(value); - } + final String? output; @override String toString() { - return 'RunStepDeltaDetails.toolCalls(type: $type, toolCalls: $toolCalls)'; + return 'RunStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsObjectImpl && - (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._toolCalls, _toolCalls)); + other is _$RunStepDetailsToolCallsFunctionImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.arguments, arguments) || + other.arguments == arguments) && + (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, type, const DeepCollectionEquality().hash(_toolCalls)); + int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsObjectImpl>(this, _$identity); - - @override - @optionalTypeArgs - TResult when({ - required TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation) - messageCreation, - required TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls) - toolCalls, - }) { - return toolCalls(type, this.toolCalls); - } - - @override - @optionalTypeArgs - TResult? whenOrNull({ - TResult? Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult? Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - }) { - return toolCalls?.call(type, this.toolCalls); - } - - @override - @optionalTypeArgs - TResult maybeWhen({ - TResult Function( - String type, - @JsonKey(name: 'message_creation', includeIfNull: false) - RunStepDeltaStepDetailsMessageCreation? messageCreation)? - messageCreation, - TResult Function( - String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? toolCalls)? - toolCalls, - required TResult orElse(), - }) { - if (toolCalls != null) { - return toolCalls(type, this.toolCalls); - } - return orElse(); - } - - @override - @optionalTypeArgs - TResult map({ - required TResult Function( - RunStepDeltaStepDetailsMessageCreationObject value) - messageCreation, - required TResult Function(RunStepDeltaStepDetailsToolCallsObject value) - toolCalls, - }) { - return toolCalls(this); - } - - @override - @optionalTypeArgs - TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult? Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - }) { - return toolCalls?.call(this); - } - - @override - @optionalTypeArgs - TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsMessageCreationObject value)? - messageCreation, - TResult Function(RunStepDeltaStepDetailsToolCallsObject value)? toolCalls, - required TResult orElse(), - }) { - if (toolCalls != null) { - return toolCalls(this); - } - return orElse(); - } + _$$RunStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDetailsToolCallsFunctionImpl> + get copyWith => __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl< + _$RunStepDetailsToolCallsFunctionImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsObjectImplToJson( + return _$$RunStepDetailsToolCallsFunctionImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsObject - extends RunStepDeltaDetails { - const factory RunStepDeltaStepDetailsToolCallsObject( - {required final String type, - @JsonKey(name: 'tool_calls', includeIfNull: false) - final List? toolCalls}) = - _$RunStepDeltaStepDetailsToolCallsObjectImpl; - const RunStepDeltaStepDetailsToolCallsObject._() : super._(); +abstract class _RunStepDetailsToolCallsFunction + extends RunStepDetailsToolCallsFunction { + const factory _RunStepDetailsToolCallsFunction( + {required final String name, + required final String arguments, + required final String? output}) = _$RunStepDetailsToolCallsFunctionImpl; + const _RunStepDetailsToolCallsFunction._() : super._(); - factory RunStepDeltaStepDetailsToolCallsObject.fromJson( - Map json) = - _$RunStepDeltaStepDetailsToolCallsObjectImpl.fromJson; + factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = + _$RunStepDetailsToolCallsFunctionImpl.fromJson; + /// The name of the function. @override + String get name; - /// Always `tool_calls`. - String get type; + /// The arguments passed to the function. + @override + String get arguments; - /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - @JsonKey(name: 'tool_calls', includeIfNull: false) - List? get toolCalls; + /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsObjectImpl> + String? get output; + + /// Create a copy of RunStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCalls _$RunStepDetailsToolCallsFromJson( +RunStepDeltaStepDetailsToolCalls _$RunStepDeltaStepDetailsToolCallsFromJson( Map json) { switch (json['type']) { case 'code_interpreter': - return RunStepDetailsToolCallsCodeObject.fromJson(json); + return RunStepDeltaStepDetailsToolCallsCodeObject.fromJson(json); case 'file_search': - return RunStepDetailsToolCallsFileSearchObject.fromJson(json); + return RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson(json); case 'function': - return RunStepDetailsToolCallsFunctionObject.fromJson(json); + return RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson(json); default: - throw CheckedFromJsonException(json, 'type', 'RunStepDetailsToolCalls', + throw CheckedFromJsonException( + json, + 'type', + 'RunStepDeltaStepDetailsToolCalls', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetailsToolCalls { +mixin _$RunStepDeltaStepDetailsToolCalls { + /// The index of the tool call in the tool calls array. + int get index => throw _privateConstructorUsedError; + /// The ID of the tool call. - String get id => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get id => throw _privateConstructorUsedError; /// Always `code_interpreter`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ required TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFunctionObject value) function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this RunStepDeltaStepDetailsToolCalls to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDetailsToolCallsCopyWith get copyWith => - throw _privateConstructorUsedError; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDeltaStepDetailsToolCallsCopyWith + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsCopyWith<$Res> { - factory $RunStepDetailsToolCallsCopyWith(RunStepDetailsToolCalls value, - $Res Function(RunStepDetailsToolCalls) then) = - _$RunStepDetailsToolCallsCopyWithImpl<$Res, RunStepDetailsToolCalls>; +abstract class $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsCopyWith( + RunStepDeltaStepDetailsToolCalls value, + $Res Function(RunStepDeltaStepDetailsToolCalls) then) = + _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCalls>; @useResult - $Res call({String id, String type}); + $Res call( + {int index, @JsonKey(includeIfNull: false) String? id, String type}); } /// @nodoc -class _$RunStepDetailsToolCallsCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCalls> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - _$RunStepDetailsToolCallsCopyWithImpl(this._value, this._then); +class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCalls> + implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, + Object? index = null, + Object? id = freezed, Object? type = null, }) { return _then(_value.copyWith( - id: null == id + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String, + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -58130,63 +66823,79 @@ class _$RunStepDetailsToolCallsCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) then) = - __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> + implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) + then) = + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; @override @useResult $Res call( - {String id, + {int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter}); + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter}); - $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> + $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? get codeInterpreter; } /// @nodoc -class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeObjectImpl> - implements _$$RunStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeObjectImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + implements _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, + Object? index = null, + Object? id = freezed, Object? type = null, - Object? codeInterpreter = null, + Object? codeInterpreter = freezed, }) { - return _then(_$RunStepDetailsToolCallsCodeObjectImpl( - id: null == id + return _then(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String, + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - codeInterpreter: null == codeInterpreter + codeInterpreter: freezed == codeInterpreter ? _value.codeInterpreter : codeInterpreter // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsCodeObjectCodeInterpreter, + as RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter?, )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res> + $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? get codeInterpreter { - return $RunStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>( - _value.codeInterpreter, (value) { + if (_value.codeInterpreter == null) { + return null; + } + + return $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< + $Res>(_value.codeInterpreter!, (value) { return _then(_value.copyWith(codeInterpreter: value)); }); } @@ -58194,116 +66903,157 @@ class __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsCodeObjectImpl - extends RunStepDetailsToolCallsCodeObject { - const _$RunStepDetailsToolCallsCodeObjectImpl( - {required this.id, +class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeObject { + const _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( + {required this.index, + @JsonKey(includeIfNull: false) this.id, required this.type, - @JsonKey(name: 'code_interpreter') required this.codeInterpreter}) + @JsonKey(name: 'code_interpreter', includeIfNull: false) + this.codeInterpreter}) : super._(); - factory _$RunStepDetailsToolCallsCodeObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsCodeObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson(json); + + /// The index of the tool call in the tool calls array. + @override + final int index; /// The ID of the tool call. @override - final String id; + @JsonKey(includeIfNull: false) + final String? id; /// Always `code_interpreter`. @override final String type; - /// The Code Interpreter tool call definition. + /// The Code Interpreter tool call definition. - outputs @override - @JsonKey(name: 'code_interpreter') - final RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter; + @JsonKey(name: 'code_interpreter', includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter; @override String toString() { - return 'RunStepDetailsToolCalls.codeInterpreter(id: $id, type: $type, codeInterpreter: $codeInterpreter)'; + return 'RunStepDeltaStepDetailsToolCalls.codeInterpreter(index: $index, id: $id, type: $type, codeInterpreter: $codeInterpreter)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && (identical(other.codeInterpreter, codeInterpreter) || other.codeInterpreter == codeInterpreter)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, id, type, codeInterpreter); + int get hashCode => + Object.hash(runtimeType, index, id, type, codeInterpreter); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsCodeObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeObjectImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) function, }) { - return codeInterpreter(id, type, this.codeInterpreter); + return codeInterpreter(index, id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, }) { - return codeInterpreter?.call(id, type, this.codeInterpreter); + return codeInterpreter?.call(index, id, type, this.codeInterpreter); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, required TResult orElse(), }) { if (codeInterpreter != null) { - return codeInterpreter(id, type, this.codeInterpreter); + return codeInterpreter(index, id, type, this.codeInterpreter); } return orElse(); } @@ -58311,11 +67061,13 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFunctionObject value) function, }) { return codeInterpreter(this); @@ -58324,10 +67076,12 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) { return codeInterpreter?.call(this); } @@ -58335,9 +67089,12 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) { if (codeInterpreter != null) { @@ -58348,82 +67105,103 @@ class _$RunStepDetailsToolCallsCodeObjectImpl @override Map toJson() { - return _$$RunStepDetailsToolCallsCodeObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsCodeObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsCodeObject( - {required final String id, +abstract class RunStepDeltaStepDetailsToolCallsCodeObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsCodeObject( + {required final int index, + @JsonKey(includeIfNull: false) final String? id, required final String type, - @JsonKey(name: 'code_interpreter') - required final RunStepDetailsToolCallsCodeObjectCodeInterpreter - codeInterpreter}) = _$RunStepDetailsToolCallsCodeObjectImpl; - const RunStepDetailsToolCallsCodeObject._() : super._(); + @JsonKey(name: 'code_interpreter', includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter}) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeObject._() : super._(); - factory RunStepDetailsToolCallsCodeObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsCodeObject.fromJson( Map json) = - _$RunStepDetailsToolCallsCodeObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; + /// The index of the tool call in the tool calls array. @override + int get index; /// The ID of the tool call. - String get id; @override + @JsonKey(includeIfNull: false) + String? get id; /// Always `code_interpreter`. + @override String get type; - /// The Code Interpreter tool call definition. - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter get codeInterpreter; + /// The Code Interpreter tool call definition. - outputs + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + get codeInterpreter; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith( - _$RunStepDetailsToolCallsFileSearchObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) then) = - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) + then) = + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( - {String id, + {int index, + @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(name: 'file_search') Map fileSearch}); } /// @nodoc -class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFileSearchObjectImpl> - implements _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFileSearchObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFileSearchObjectImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> + implements + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) + _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, + Object? index = null, + Object? id = freezed, Object? type = null, Object? fileSearch = null, }) { - return _then(_$RunStepDetailsToolCallsFileSearchObjectImpl( - id: null == id + return _then(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String, + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -58438,23 +67216,29 @@ class __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFileSearchObjectImpl - extends RunStepDetailsToolCallsFileSearchObject { - const _$RunStepDetailsToolCallsFileSearchObjectImpl( - {required this.id, +class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl + extends RunStepDeltaStepDetailsToolCallsFileSearchObject { + const _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( + {required this.index, + @JsonKey(includeIfNull: false) this.id, required this.type, @JsonKey(name: 'file_search') required final Map fileSearch}) : _fileSearch = fileSearch, super._(); - factory _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFileSearchObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson(json); + + /// The index of the tool call in the tool calls array. + @override + final int index; /// The ID of the tool call object. @override - final String id; + @JsonKey(includeIfNull: false) + final String? id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. @override @@ -58474,91 +67258,122 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override String toString() { - return 'RunStepDetailsToolCalls.fileSearch(id: $id, type: $type, fileSearch: $fileSearch)'; + return 'RunStepDeltaStepDetailsToolCalls.fileSearch(index: $index, id: $id, type: $type, fileSearch: $fileSearch)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFileSearchObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && const DeepCollectionEquality() .equals(other._fileSearch, _fileSearch)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash( - runtimeType, id, type, const DeepCollectionEquality().hash(_fileSearch)); + int get hashCode => Object.hash(runtimeType, index, id, type, + const DeepCollectionEquality().hash(_fileSearch)); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => - __$$RunStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFileSearchObjectImpl>(this, _$identity); + __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) function, }) { - return fileSearch(id, type, this.fileSearch); + return fileSearch(index, id, type, this.fileSearch); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult? Function(String id, String type, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, }) { - return fileSearch?.call(id, type, this.fileSearch); + return fileSearch?.call(index, id, type, this.fileSearch); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, required TResult orElse(), }) { if (fileSearch != null) { - return fileSearch(id, type, this.fileSearch); + return fileSearch(index, id, type, this.fileSearch); } return orElse(); } @@ -58566,11 +67381,13 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFunctionObject value) function, }) { return fileSearch(this); @@ -58579,10 +67396,12 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) { return fileSearch?.call(this); } @@ -58590,9 +67409,12 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) { if (fileSearch != null) { @@ -58603,97 +67425,125 @@ class _$RunStepDetailsToolCallsFileSearchObjectImpl @override Map toJson() { - return _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsFileSearchObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsFileSearchObject( - {required final String id, +abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsFileSearchObject( + {required final int index, + @JsonKey(includeIfNull: false) final String? id, required final String type, @JsonKey(name: 'file_search') required final Map fileSearch}) = - _$RunStepDetailsToolCallsFileSearchObjectImpl; - const RunStepDetailsToolCallsFileSearchObject._() : super._(); + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl; + const RunStepDeltaStepDetailsToolCallsFileSearchObject._() : super._(); - factory RunStepDetailsToolCallsFileSearchObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson( Map json) = - _$RunStepDetailsToolCallsFileSearchObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; + /// The index of the tool call in the tool calls array. @override + int get index; /// The ID of the tool call object. - String get id; @override + @JsonKey(includeIfNull: false) + String? get id; /// The type of tool call. This is always going to be `file_search` for this type of tool call. + @override String get type; /// For now, this is always going to be an empty object. @JsonKey(name: 'file_search') Map get fileSearch; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDetailsToolCallsFileSearchObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith( - _$RunStepDetailsToolCallsFunctionObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) then) = - __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) + then) = + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call({String id, String type, RunStepDetailsToolCallsFunction function}); + $Res call( + {int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function}); - $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function; + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function; } /// @nodoc -class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFunctionObjectImpl> - implements _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsFunctionObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsFunctionObjectImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> + implements + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? id = null, + Object? index = null, + Object? id = freezed, Object? type = null, - Object? function = null, + Object? function = freezed, }) { - return _then(_$RunStepDetailsToolCallsFunctionObjectImpl( - id: null == id + return _then(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + id: freezed == id ? _value.id : id // ignore: cast_nullable_to_non_nullable - as String, + as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - function: null == function + function: freezed == function ? _value.function : function // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsFunction, + as RunStepDeltaStepDetailsToolCallsFunction?, )); } + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsFunctionCopyWith<$Res> get function { - return $RunStepDetailsToolCallsFunctionCopyWith<$Res>(_value.function, - (value) { + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { + if (_value.function == null) { + return null; + } + + return $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>( + _value.function!, (value) { return _then(_value.copyWith(function: value)); }); } @@ -58701,19 +67551,27 @@ class __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFunctionObjectImpl - extends RunStepDetailsToolCallsFunctionObject { - const _$RunStepDetailsToolCallsFunctionObjectImpl( - {required this.id, required this.type, required this.function}) +class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl + extends RunStepDeltaStepDetailsToolCallsFunctionObject { + const _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( + {required this.index, + @JsonKey(includeIfNull: false) this.id, + required this.type, + @JsonKey(includeIfNull: false) this.function}) : super._(); - factory _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFunctionObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson(json); + + /// The index of the tool call in the tool calls array. + @override + final int index; /// The ID of the tool call object. @override - final String id; + @JsonKey(includeIfNull: false) + final String? id; /// Always `function`. @override @@ -58721,93 +67579,126 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl /// The definition of the function that was called. @override - final RunStepDetailsToolCallsFunction function; + @JsonKey(includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsFunction? function; @override String toString() { - return 'RunStepDetailsToolCalls.function(id: $id, type: $type, function: $function)'; + return 'RunStepDeltaStepDetailsToolCalls.function(index: $index, id: $id, type: $type, function: $function)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFunctionObjectImpl && + other is _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl && + (identical(other.index, index) || other.index == index) && (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && (identical(other.function, function) || other.function == function)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, id, type, function); + int get hashCode => Object.hash(runtimeType, index, id, type, function); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDetailsToolCallsFunctionObjectImpl> - get copyWith => __$$RunStepDetailsToolCallsFunctionObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsFunctionObjectImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl>( + this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter) + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter) codeInterpreter, - required TResult Function(String id, String type, + required TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch) fileSearch, required TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function) + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function) function, }) { - return function(id, type, this.function); + return function(index, id, type, this.function); } @override @optionalTypeArgs TResult? whenOrNull({ TResult? Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? + codeInterpreter, + TResult? Function( + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? - codeInterpreter, - TResult? Function(String id, String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult? Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, }) { - return function?.call(id, type, this.function); + return function?.call(index, id, type, this.function); } @override @optionalTypeArgs TResult maybeWhen({ TResult Function( - String id, + int index, + @JsonKey(includeIfNull: false) String? id, String type, - @JsonKey(name: 'code_interpreter') - RunStepDetailsToolCallsCodeObjectCodeInterpreter codeInterpreter)? + @JsonKey(name: 'code_interpreter', includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? + codeInterpreter)? codeInterpreter, - TResult Function(String id, String type, + TResult Function( + int index, + @JsonKey(includeIfNull: false) String? id, + String type, @JsonKey(name: 'file_search') Map fileSearch)? fileSearch, TResult Function( - String id, String type, RunStepDetailsToolCallsFunction function)? + int index, + @JsonKey(includeIfNull: false) String? id, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? function)? function, required TResult orElse(), }) { if (function != null) { - return function(id, type, this.function); + return function(index, id, type, this.function); } return orElse(); } @@ -58815,11 +67706,13 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeObject value) + required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) codeInterpreter, - required TResult Function(RunStepDetailsToolCallsFileSearchObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFileSearchObject value) fileSearch, - required TResult Function(RunStepDetailsToolCallsFunctionObject value) + required TResult Function( + RunStepDeltaStepDetailsToolCallsFunctionObject value) function, }) { return function(this); @@ -58828,10 +67721,12 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult? Function(RunStepDetailsToolCallsFileSearchObject value)? + TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult? Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, }) { return function?.call(this); } @@ -58839,9 +67734,12 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeObject value)? codeInterpreter, - TResult Function(RunStepDetailsToolCallsFileSearchObject value)? fileSearch, - TResult Function(RunStepDetailsToolCallsFunctionObject value)? function, + TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? + codeInterpreter, + TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? + fileSearch, + TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? + function, required TResult orElse(), }) { if (function != null) { @@ -58852,103 +67750,128 @@ class _$RunStepDetailsToolCallsFunctionObjectImpl @override Map toJson() { - return _$$RunStepDetailsToolCallsFunctionObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsFunctionObject - extends RunStepDetailsToolCalls { - const factory RunStepDetailsToolCallsFunctionObject( - {required final String id, +abstract class RunStepDeltaStepDetailsToolCallsFunctionObject + extends RunStepDeltaStepDetailsToolCalls { + const factory RunStepDeltaStepDetailsToolCallsFunctionObject( + {required final int index, + @JsonKey(includeIfNull: false) final String? id, required final String type, - required final RunStepDetailsToolCallsFunction function}) = - _$RunStepDetailsToolCallsFunctionObjectImpl; - const RunStepDetailsToolCallsFunctionObject._() : super._(); + @JsonKey(includeIfNull: false) + final RunStepDeltaStepDetailsToolCallsFunction? function}) = + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl; + const RunStepDeltaStepDetailsToolCallsFunctionObject._() : super._(); - factory RunStepDetailsToolCallsFunctionObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson( Map json) = - _$RunStepDetailsToolCallsFunctionObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; + /// The index of the tool call in the tool calls array. @override + int get index; /// The ID of the tool call object. - String get id; @override + @JsonKey(includeIfNull: false) + String? get id; /// Always `function`. + @override String get type; /// The definition of the function that was called. - RunStepDetailsToolCallsFunction get function; + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsFunction? get function; + + /// Create a copy of RunStepDeltaStepDetailsToolCalls + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDetailsToolCallsFunctionObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsFunction _$RunStepDetailsToolCallsFunctionFromJson( - Map json) { - return _RunStepDetailsToolCallsFunction.fromJson(json); +RunStepDeltaStepDetailsToolCallsFunction + _$RunStepDeltaStepDetailsToolCallsFunctionFromJson( + Map json) { + return _RunStepDeltaStepDetailsToolCallsFunction.fromJson(json); } /// @nodoc -mixin _$RunStepDetailsToolCallsFunction { +mixin _$RunStepDeltaStepDetailsToolCallsFunction { /// The name of the function. - String get name => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; /// The arguments passed to the function. - String get arguments => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get arguments => throw _privateConstructorUsedError; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @JsonKey(includeIfNull: false) String? get output => throw _privateConstructorUsedError; + /// Serializes this RunStepDeltaStepDetailsToolCallsFunction to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDetailsToolCallsFunctionCopyWith + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< + RunStepDeltaStepDetailsToolCallsFunction> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - factory $RunStepDetailsToolCallsFunctionCopyWith( - RunStepDetailsToolCallsFunction value, - $Res Function(RunStepDetailsToolCallsFunction) then) = - _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - RunStepDetailsToolCallsFunction>; +abstract class $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsFunctionCopyWith( + RunStepDeltaStepDetailsToolCallsFunction value, + $Res Function(RunStepDeltaStepDetailsToolCallsFunction) then) = + _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCallsFunction>; @useResult - $Res call({String name, String arguments, String? output}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? arguments, + @JsonKey(includeIfNull: false) String? output}); } /// @nodoc -class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsFunction> - implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - _$RunStepDetailsToolCallsFunctionCopyWithImpl(this._value, this._then); +class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCallsFunction> + implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl( + this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? arguments = null, + Object? name = freezed, + Object? arguments = freezed, Object? output = freezed, }) { return _then(_value.copyWith( - name: null == name + name: freezed == name ? _value.name : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments + as String?, + arguments: freezed == arguments ? _value.arguments : arguments // ignore: cast_nullable_to_non_nullable - as String, + as String?, output: freezed == output ? _value.output : output // ignore: cast_nullable_to_non_nullable @@ -58958,43 +67881,48 @@ class _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> - implements $RunStepDetailsToolCallsFunctionCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsFunctionImplCopyWith( - _$RunStepDetailsToolCallsFunctionImpl value, - $Res Function(_$RunStepDetailsToolCallsFunctionImpl) then) = - __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; +abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> + implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsFunctionImpl value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) then) = + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; @override @useResult - $Res call({String name, String arguments, String? output}); + $Res call( + {@JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? arguments, + @JsonKey(includeIfNull: false) String? output}); } /// @nodoc -class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsFunctionCopyWithImpl<$Res, - _$RunStepDetailsToolCallsFunctionImpl> - implements _$$RunStepDetailsToolCallsFunctionImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl( - _$RunStepDetailsToolCallsFunctionImpl _value, - $Res Function(_$RunStepDetailsToolCallsFunctionImpl) _then) +class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> + extends _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + implements _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> { + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsFunctionImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? name = null, - Object? arguments = null, + Object? name = freezed, + Object? arguments = freezed, Object? output = freezed, }) { - return _then(_$RunStepDetailsToolCallsFunctionImpl( - name: null == name + return _then(_$RunStepDeltaStepDetailsToolCallsFunctionImpl( + name: freezed == name ? _value.name : name // ignore: cast_nullable_to_non_nullable - as String, - arguments: null == arguments + as String?, + arguments: freezed == arguments ? _value.arguments : arguments // ignore: cast_nullable_to_non_nullable - as String, + as String?, output: freezed == output ? _value.output : output // ignore: cast_nullable_to_non_nullable @@ -59005,512 +67933,567 @@ class __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsFunctionImpl - extends _RunStepDetailsToolCallsFunction { - const _$RunStepDetailsToolCallsFunctionImpl( - {required this.name, required this.arguments, required this.output}) +class _$RunStepDeltaStepDetailsToolCallsFunctionImpl + extends _RunStepDeltaStepDetailsToolCallsFunction { + const _$RunStepDeltaStepDetailsToolCallsFunctionImpl( + {@JsonKey(includeIfNull: false) this.name, + @JsonKey(includeIfNull: false) this.arguments, + @JsonKey(includeIfNull: false) this.output}) : super._(); - factory _$RunStepDetailsToolCallsFunctionImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsFunctionImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsFunctionImplFromJson(json); /// The name of the function. @override - final String name; + @JsonKey(includeIfNull: false) + final String? name; /// The arguments passed to the function. @override - final String arguments; + @JsonKey(includeIfNull: false) + final String? arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. @override + @JsonKey(includeIfNull: false) final String? output; @override String toString() { - return 'RunStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; + return 'RunStepDeltaStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsFunctionImpl && + other is _$RunStepDeltaStepDetailsToolCallsFunctionImpl && (identical(other.name, name) || other.name == name) && (identical(other.arguments, arguments) || other.arguments == arguments) && (identical(other.output, output) || other.output == output)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, name, arguments, output); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDetailsToolCallsFunctionImpl> - get copyWith => __$$RunStepDetailsToolCallsFunctionImplCopyWithImpl< - _$RunStepDetailsToolCallsFunctionImpl>(this, _$identity); + _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + get copyWith => + __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl>(this, _$identity); @override Map toJson() { - return _$$RunStepDetailsToolCallsFunctionImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsFunctionImplToJson( this, ); } } -abstract class _RunStepDetailsToolCallsFunction - extends RunStepDetailsToolCallsFunction { - const factory _RunStepDetailsToolCallsFunction( - {required final String name, - required final String arguments, - required final String? output}) = _$RunStepDetailsToolCallsFunctionImpl; - const _RunStepDetailsToolCallsFunction._() : super._(); - - factory _RunStepDetailsToolCallsFunction.fromJson(Map json) = - _$RunStepDetailsToolCallsFunctionImpl.fromJson; +abstract class _RunStepDeltaStepDetailsToolCallsFunction + extends RunStepDeltaStepDetailsToolCallsFunction { + const factory _RunStepDeltaStepDetailsToolCallsFunction( + {@JsonKey(includeIfNull: false) final String? name, + @JsonKey(includeIfNull: false) final String? arguments, + @JsonKey(includeIfNull: false) final String? output}) = + _$RunStepDeltaStepDetailsToolCallsFunctionImpl; + const _RunStepDeltaStepDetailsToolCallsFunction._() : super._(); - @override + factory _RunStepDeltaStepDetailsToolCallsFunction.fromJson( + Map json) = + _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; /// The name of the function. - String get name; @override + @JsonKey(includeIfNull: false) + String? get name; /// The arguments passed to the function. - String get arguments; @override + @JsonKey(includeIfNull: false) + String? get arguments; /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. + @override + @JsonKey(includeIfNull: false) String? get output; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsFunction + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDetailsToolCallsFunctionImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsFunctionImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDeltaStepDetailsToolCalls _$RunStepDeltaStepDetailsToolCallsFromJson( +RunStepDetailsToolCallsCodeOutput _$RunStepDetailsToolCallsCodeOutputFromJson( Map json) { switch (json['type']) { - case 'code_interpreter': - return RunStepDeltaStepDetailsToolCallsCodeObject.fromJson(json); - case 'file_search': - return RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson(json); - case 'function': - return RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson(json); + case 'logs': + return RunStepDetailsToolCallsCodeOutputLogsObject.fromJson(json); + case 'image': + return RunStepDetailsToolCallsCodeOutputImageObject.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'RunStepDeltaStepDetailsToolCalls', + 'RunStepDetailsToolCallsCodeOutput', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDeltaStepDetailsToolCalls { - /// The index of the tool call in the tool calls array. - int get index => throw _privateConstructorUsedError; - - /// The ID of the tool call. - @JsonKey(includeIfNull: false) - String? get id => throw _privateConstructorUsedError; - - /// Always `code_interpreter`. +mixin _$RunStepDetailsToolCallsCodeOutput { + /// Always `logs`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ + required TResult Function(String type, String logs) logs, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) - codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch) - fileSearch, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) - function, + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ + TResult? Function(String type, String logs)? logs, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDetailsToolCallsCodeOutputImageObject value) + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + + /// Serializes this RunStepDetailsToolCallsCodeOutput to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDetailsToolCallsCodeOutputCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory $RunStepDetailsToolCallsCodeOutputCopyWith( + RunStepDetailsToolCallsCodeOutput value, + $Res Function(RunStepDetailsToolCallsCodeOutput) then) = + _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + RunStepDetailsToolCallsCodeOutput>; + @useResult + $Res call({String type}); +} + +/// @nodoc +class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + $Val extends RunStepDetailsToolCallsCodeOutput> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + _$RunStepDetailsToolCallsCodeOutputCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + }) { + return _then(_value.copyWith( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) + then) = + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String type, String logs}); +} + +/// @nodoc +class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + implements + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) + : super(_value, _then); + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? type = null, + Object? logs = null, + }) { + return _then(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + logs: null == logs + ? _value.logs + : logs // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl + extends RunStepDetailsToolCallsCodeOutputLogsObject { + const _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( + {required this.type, required this.logs}) + : super._(); + + factory _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( + Map json) => + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); + + /// Always `logs`. + @override + final String type; + + /// The text output from the Code Interpreter tool call. + @override + final String logs; + + @override + String toString() { + return 'RunStepDetailsToolCallsCodeOutput.logs(type: $type, logs: $logs)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl && + (identical(other.type, type) || other.type == type) && + (identical(other.logs, logs) || other.logs == logs)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash(runtimeType, type, logs); + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + get copyWith => + __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String type, String logs) logs, + required TResult Function( + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, + }) { + return logs(type, this.logs); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String type, String logs)? logs, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, - }) => - throw _privateConstructorUsedError; + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, + }) { + return logs?.call(type, this.logs); + } + + @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, required TResult orElse(), - }) => - throw _privateConstructorUsedError; + }) { + if (logs != null) { + return logs(type, this.logs); + } + return orElse(); + } + + @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) - fileSearch, + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) - function, - }) => - throw _privateConstructorUsedError; + RunStepDetailsToolCallsCodeOutputImageObject value) + image, + }) { + return logs(this); + } + + @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, - }) => - throw _privateConstructorUsedError; + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, + }) { + return logs?.call(this); + } + + @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, required TResult orElse(), - }) => - throw _privateConstructorUsedError; - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDeltaStepDetailsToolCallsCopyWith - get copyWith => throw _privateConstructorUsedError; -} + }) { + if (logs != null) { + return logs(this); + } + return orElse(); + } -/// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsCopyWith( - RunStepDeltaStepDetailsToolCalls value, - $Res Function(RunStepDeltaStepDetailsToolCalls) then) = - _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCalls>; - @useResult - $Res call( - {int index, @JsonKey(includeIfNull: false) String? id, String type}); + @override + Map toJson() { + return _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplToJson( + this, + ); + } } -/// @nodoc -class _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCalls> - implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsCopyWithImpl(this._value, this._then); +abstract class RunStepDetailsToolCallsCodeOutputLogsObject + extends RunStepDetailsToolCallsCodeOutput { + const factory RunStepDetailsToolCallsCodeOutputLogsObject( + {required final String type, required final String logs}) = + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl; + const RunStepDetailsToolCallsCodeOutputLogsObject._() : super._(); - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; + factory RunStepDetailsToolCallsCodeOutputLogsObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; - @pragma('vm:prefer-inline') + /// Always `logs`. @override - $Res call({ - Object? index = null, - Object? id = freezed, - Object? type = null, - }) { - return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, - type: null == type - ? _value.type - : type // ignore: cast_nullable_to_non_nullable - as String, - ) as $Val); - } + String get type; + + /// The text output from the Code Interpreter tool call. + String get logs; + + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> - implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) +abstract class _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> + implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res>; + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter}); + $Res call({String type, RunStepDetailsToolCallsCodeOutputImage image}); - $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? - get codeInterpreter; + $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> - implements _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl) _then) +class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> + extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + implements + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> { + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl _value, + $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, - Object? id = freezed, Object? type = null, - Object? codeInterpreter = freezed, + Object? image = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, + return _then(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - codeInterpreter: freezed == codeInterpreter - ? _value.codeInterpreter - : codeInterpreter // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter?, + image: null == image + ? _value.image + : image // ignore: cast_nullable_to_non_nullable + as RunStepDetailsToolCallsCodeOutputImage, )); } + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith<$Res>? - get codeInterpreter { - if (_value.codeInterpreter == null) { - return null; - } - - return $RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterCopyWith< - $Res>(_value.codeInterpreter!, (value) { - return _then(_value.copyWith(codeInterpreter: value)); + $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { + return $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res>(_value.image, + (value) { + return _then(_value.copyWith(image: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeObject { - const _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - {required this.index, - @JsonKey(includeIfNull: false) this.id, - required this.type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - this.codeInterpreter}) +class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl + extends RunStepDetailsToolCallsCodeOutputImageObject { + const _$RunStepDetailsToolCallsCodeOutputImageObjectImpl( + {required this.type, required this.image}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson( + factory _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson(json); - - /// The index of the tool call in the tool calls array. - @override - final int index; - - /// The ID of the tool call. - @override - @JsonKey(includeIfNull: false) - final String? id; + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplFromJson(json); - /// Always `code_interpreter`. + /// Always `image`. @override final String type; - /// The Code Interpreter tool call definition. - outputs + /// Code interpreter image output. @override - @JsonKey(name: 'code_interpreter', includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter; + final RunStepDetailsToolCallsCodeOutputImage image; @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.codeInterpreter(index: $index, id: $id, type: $type, codeInterpreter: $codeInterpreter)'; + return 'RunStepDetailsToolCallsCodeOutput.image(type: $type, image: $image)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.id, id) || other.id == id) && + other is _$RunStepDetailsToolCallsCodeOutputImageObjectImpl && (identical(other.type, type) || other.type == type) && - (identical(other.codeInterpreter, codeInterpreter) || - other.codeInterpreter == codeInterpreter)); + (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => - Object.hash(runtimeType, index, id, type, codeInterpreter); + int get hashCode => Object.hash(runtimeType, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl>( + __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ + required TResult Function(String type, String logs) logs, required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) - codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch) - fileSearch, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) - function, + String type, RunStepDetailsToolCallsCodeOutputImage image) + image, }) { - return codeInterpreter(index, id, type, this.codeInterpreter); + return image(type, this.image); } @override @optionalTypeArgs TResult? whenOrNull({ + TResult? Function(String type, String logs)? logs, TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, }) { - return codeInterpreter?.call(index, id, type, this.codeInterpreter); + return image?.call(type, this.image); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + TResult Function(String type, String logs)? logs, + TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? + image, required TResult orElse(), }) { - if (codeInterpreter != null) { - return codeInterpreter(index, id, type, this.codeInterpreter); + if (image != null) { + return image(type, this.image); } return orElse(); } @@ -59518,313 +68501,391 @@ class _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) - codeInterpreter, - required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) - fileSearch, + required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) + logs, required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) - function, + RunStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return codeInterpreter(this); + return image(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? + image, + }) { + return image?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, + TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + required TResult orElse(), + }) { + if (image != null) { + return image(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$RunStepDetailsToolCallsCodeOutputImageObjectImplToJson( + this, + ); } +} + +abstract class RunStepDetailsToolCallsCodeOutputImageObject + extends RunStepDetailsToolCallsCodeOutput { + const factory RunStepDetailsToolCallsCodeOutputImageObject( + {required final String type, + required final RunStepDetailsToolCallsCodeOutputImage image}) = + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl; + const RunStepDetailsToolCallsCodeOutputImageObject._() : super._(); + + factory RunStepDetailsToolCallsCodeOutputImageObject.fromJson( + Map json) = + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; + + /// Always `image`. + @override + String get type; + + /// Code interpreter image output. + RunStepDetailsToolCallsCodeOutputImage get image; + /// Create a copy of RunStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + get copyWith => throw _privateConstructorUsedError; +} + +RunStepDeltaStepDetailsToolCallsCodeOutput + _$RunStepDeltaStepDetailsToolCallsCodeOutputFromJson( + Map json) { + switch (json['type']) { + case 'logs': + return RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( + json); + case 'image': + return RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( + json); + + default: + throw CheckedFromJsonException( + json, + 'type', + 'RunStepDeltaStepDetailsToolCallsCodeOutput', + 'Invalid union type "${json['type']}"!'); + } +} + +/// @nodoc +mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { + /// The index of the output in the outputs array. + int get index => throw _privateConstructorUsedError; + + /// Always `logs`. + String get type => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function( + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, + required TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult? Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, + TResult Function( + int index, + String type, + @JsonKey(includeIfNull: false) + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, + required TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, - }) { - return codeInterpreter?.call(this); - } - - @override + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, + }) => + throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), - }) { - if (codeInterpreter != null) { - return codeInterpreter(this); - } - return orElse(); - } - - @override - Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplToJson( - this, - ); - } -} - -abstract class RunStepDeltaStepDetailsToolCallsCodeObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsCodeObject( - {required final int index, - @JsonKey(includeIfNull: false) final String? id, - required final String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter}) = _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeObject._() : super._(); + }) => + throw _privateConstructorUsedError; - factory RunStepDeltaStepDetailsToolCallsCodeObject.fromJson( - Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl.fromJson; + /// Serializes this RunStepDeltaStepDetailsToolCallsCodeOutput to a JSON map. + Map toJson() => throw _privateConstructorUsedError; - @override + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< + RunStepDeltaStepDetailsToolCallsCodeOutput> + get copyWith => throw _privateConstructorUsedError; +} - /// The index of the tool call in the tool calls array. - int get index; - @override +/// @nodoc +abstract class $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith( + RunStepDeltaStepDetailsToolCallsCodeOutput value, + $Res Function(RunStepDeltaStepDetailsToolCallsCodeOutput) then) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + RunStepDeltaStepDetailsToolCallsCodeOutput>; + @useResult + $Res call({int index, String type}); +} - /// The ID of the tool call. - @JsonKey(includeIfNull: false) - String? get id; - @override +/// @nodoc +class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + $Val extends RunStepDeltaStepDetailsToolCallsCodeOutput> + implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl( + this._value, this._then); - /// Always `code_interpreter`. - String get type; + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; - /// The Code Interpreter tool call definition. - outputs - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - get codeInterpreter; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl> - get copyWith => throw _privateConstructorUsedError; + $Res call({ + Object? index = null, + Object? type = null, + }) { + return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + type: null == type + ? _value.type + : type // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl value, + $Res Function( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< $Res>; @override @useResult $Res call( - {int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch}); + {int index, String type, @JsonKey(includeIfNull: false) String? logs}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> +class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + $Res> + extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> implements - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + $Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? index = null, - Object? id = freezed, Object? type = null, - Object? fileSearch = null, + Object? logs = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable as int, - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - fileSearch: null == fileSearch - ? _value._fileSearch - : fileSearch // ignore: cast_nullable_to_non_nullable - as Map, + logs: freezed == logs + ? _value.logs + : logs // ignore: cast_nullable_to_non_nullable + as String?, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl - extends RunStepDeltaStepDetailsToolCallsFileSearchObject { - const _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( +class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject { + const _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( {required this.index, - @JsonKey(includeIfNull: false) this.id, required this.type, - @JsonKey(name: 'file_search') - required final Map fileSearch}) - : _fileSearch = fileSearch, - super._(); + @JsonKey(includeIfNull: false) this.logs}) + : super._(); - factory _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); - /// The index of the tool call in the tool calls array. + /// The index of the output in the outputs array. @override final int index; - /// The ID of the tool call object. - @override - @JsonKey(includeIfNull: false) - final String? id; - - /// The type of tool call. This is always going to be `file_search` for this type of tool call. + /// Always `logs`. @override final String type; - /// For now, this is always going to be an empty object. - final Map _fileSearch; - - /// For now, this is always going to be an empty object. + /// The text output from the Code Interpreter tool call. @override - @JsonKey(name: 'file_search') - Map get fileSearch { - if (_fileSearch is EqualUnmodifiableMapView) return _fileSearch; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(_fileSearch); - } + @JsonKey(includeIfNull: false) + final String? logs; @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.fileSearch(index: $index, id: $id, type: $type, fileSearch: $fileSearch)'; + return 'RunStepDeltaStepDetailsToolCallsCodeOutput.logs(index: $index, type: $type, logs: $logs)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl && + other + is _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl && (identical(other.index, index) || other.index == index) && - (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - const DeepCollectionEquality() - .equals(other._fileSearch, _fileSearch)); + (identical(other.logs, logs) || other.logs == logs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, id, type, - const DeepCollectionEquality().hash(_fileSearch)); + int get hashCode => Object.hash(runtimeType, index, type, logs); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl>( + __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) - codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch) - fileSearch, + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, required TResult Function( int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, }) { - return fileSearch(index, id, type, this.fileSearch); + return logs(index, type, this.logs); } @override @optionalTypeArgs TResult? whenOrNull({ + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, TResult? Function( int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, }) { - return fileSearch?.call(index, id, type, this.fileSearch); + return logs?.call(index, type, this.logs); } @override @optionalTypeArgs TResult maybeWhen({ + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, TResult Function( int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(index, id, type, this.fileSearch); + if (logs != null) { + return logs(index, type, this.logs); } return orElse(); } @@ -59832,316 +68893,272 @@ class _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) - codeInterpreter, required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) - fileSearch, + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return fileSearch(this); + return logs(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return fileSearch?.call(this); + return logs?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), }) { - if (fileSearch != null) { - return fileSearch(this); + if (logs != null) { + return logs(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsFileSearchObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsFileSearchObject( +abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject + extends RunStepDeltaStepDetailsToolCallsCodeOutput { + const factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject( {required final int index, - @JsonKey(includeIfNull: false) final String? id, required final String type, - @JsonKey(name: 'file_search') - required final Map fileSearch}) = - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl; - const RunStepDeltaStepDetailsToolCallsFileSearchObject._() : super._(); + @JsonKey(includeIfNull: false) final String? logs}) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsFileSearchObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; + /// The index of the output in the outputs array. @override - - /// The index of the tool call in the tool calls array. int get index; - @override - /// The ID of the tool call object. - @JsonKey(includeIfNull: false) - String? get id; + /// Always `logs`. @override - - /// The type of tool call. This is always going to be `file_search` for this type of tool call. String get type; - /// For now, this is always going to be an empty object. - @JsonKey(name: 'file_search') - Map get fileSearch; + /// The text output from the Code Interpreter tool call. + @JsonKey(includeIfNull: false) + String? get logs; + + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) +abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { + factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl value, + $Res Function( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res>; + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + $Res>; @override @useResult $Res call( {int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function}); + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}); - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function; + $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> +class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + $Res> + extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> implements - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl) _then) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + $Res> { + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _value, + $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) + _then) : super(_value, _then); + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? index = null, - Object? id = freezed, Object? type = null, - Object? function = freezed, + Object? image = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( + return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( index: null == index ? _value.index : index // ignore: cast_nullable_to_non_nullable as int, - id: freezed == id - ? _value.id - : id // ignore: cast_nullable_to_non_nullable - as String?, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - function: freezed == function - ? _value.function - : function // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsFunction?, + image: freezed == image + ? _value.image + : image // ignore: cast_nullable_to_non_nullable + as RunStepDeltaStepDetailsToolCallsCodeOutputImage?, )); } + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>? get function { - if (_value.function == null) { + $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { + if (_value.image == null) { return null; } - return $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res>( - _value.function!, (value) { - return _then(_value.copyWith(function: value)); + return $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>( + _value.image!, (value) { + return _then(_value.copyWith(image: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl - extends RunStepDeltaStepDetailsToolCallsFunctionObject { - const _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( +class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl + extends RunStepDeltaStepDetailsToolCallsCodeOutputImageObject { + const _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( {required this.index, - @JsonKey(includeIfNull: false) this.id, required this.type, - @JsonKey(includeIfNull: false) this.function}) + @JsonKey(includeIfNull: false) this.image}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson( + factory _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson(json); + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( + json); - /// The index of the tool call in the tool calls array. + /// The index of the output in the outputs array. @override final int index; - /// The ID of the tool call object. - @override - @JsonKey(includeIfNull: false) - final String? id; - - /// Always `function`. + /// Always `image`. @override final String type; - /// The definition of the function that was called. + /// Code interpreter image output. @override @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsFunction? function; + final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image; @override String toString() { - return 'RunStepDeltaStepDetailsToolCalls.function(index: $index, id: $id, type: $type, function: $function)'; + return 'RunStepDeltaStepDetailsToolCallsCodeOutput.image(index: $index, type: $type, image: $image)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl && + other + is _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl && (identical(other.index, index) || other.index == index) && - (identical(other.id, id) || other.id == id) && (identical(other.type, type) || other.type == type) && - (identical(other.function, function) || - other.function == function)); + (identical(other.image, image) || other.image == image)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, id, type, function); + int get hashCode => Object.hash(runtimeType, index, type, image); - @JsonKey(ignore: true) + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl>( + __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl>( this, _$identity); @override @optionalTypeArgs TResult when({ required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter) - codeInterpreter, - required TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch) - fileSearch, + int index, String type, @JsonKey(includeIfNull: false) String? logs) + logs, required TResult Function( int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function) - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) + image, }) { - return function(index, id, type, this.function); + return image(index, type, this.image); } @override @optionalTypeArgs TResult? whenOrNull({ + TResult? Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, TResult? Function( int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult? Function( - int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, }) { - return function?.call(index, id, type, this.function); + return image?.call(index, type, this.image); } @override @optionalTypeArgs TResult maybeWhen({ + TResult Function(int index, String type, + @JsonKey(includeIfNull: false) String? logs)? + logs, TResult Function( int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'code_interpreter', includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? - codeInterpreter)? - codeInterpreter, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, - String type, - @JsonKey(name: 'file_search') Map fileSearch)? - fileSearch, - TResult Function( - int index, - @JsonKey(includeIfNull: false) String? id, String type, @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? function)? - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? + image, required TResult orElse(), }) { - if (function != null) { - return function(index, id, type, this.function); + if (image != null) { + return image(index, type, this.image); } return orElse(); } @@ -60149,418 +69166,184 @@ class _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value) - codeInterpreter, required TResult Function( - RunStepDeltaStepDetailsToolCallsFileSearchObject value) - fileSearch, + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) + logs, required TResult Function( - RunStepDeltaStepDetailsToolCallsFunctionObject value) - function, + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) + image, }) { - return function(this); + return image(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult? Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult? Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult? Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, }) { - return function?.call(this); + return image?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDeltaStepDetailsToolCallsCodeObject value)? - codeInterpreter, - TResult Function(RunStepDeltaStepDetailsToolCallsFileSearchObject value)? - fileSearch, - TResult Function(RunStepDeltaStepDetailsToolCallsFunctionObject value)? - function, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? + logs, + TResult Function( + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? + image, required TResult orElse(), }) { - if (function != null) { - return function(this); + if (image != null) { + return image(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplToJson( + return _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsFunctionObject - extends RunStepDeltaStepDetailsToolCalls { - const factory RunStepDeltaStepDetailsToolCallsFunctionObject( +abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject + extends RunStepDeltaStepDetailsToolCallsCodeOutput { + const factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject( {required final int index, - @JsonKey(includeIfNull: false) final String? id, required final String type, @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsFunction? function}) = - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl; - const RunStepDeltaStepDetailsToolCallsFunctionObject._() : super._(); + final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}) = + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl; + const RunStepDeltaStepDetailsToolCallsCodeOutputImageObject._() : super._(); - factory RunStepDeltaStepDetailsToolCallsFunctionObject.fromJson( + factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl.fromJson; + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; + /// The index of the output in the outputs array. @override - - /// The index of the tool call in the tool calls array. int get index; - @override - /// The ID of the tool call object. - @JsonKey(includeIfNull: false) - String? get id; + /// Always `image`. @override - - /// Always `function`. String get type; - /// The definition of the function that was called. - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsFunction? get function; - @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl> - get copyWith => throw _privateConstructorUsedError; -} - -RunStepDeltaStepDetailsToolCallsFunction - _$RunStepDeltaStepDetailsToolCallsFunctionFromJson( - Map json) { - return _RunStepDeltaStepDetailsToolCallsFunction.fromJson(json); -} - -/// @nodoc -mixin _$RunStepDeltaStepDetailsToolCallsFunction { - /// The name of the function. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; - - /// The arguments passed to the function. - @JsonKey(includeIfNull: false) - String? get arguments => throw _privateConstructorUsedError; - - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @JsonKey(includeIfNull: false) - String? get output => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDeltaStepDetailsToolCallsFunctionCopyWith< - RunStepDeltaStepDetailsToolCallsFunction> - get copyWith => throw _privateConstructorUsedError; -} - -/// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsFunctionCopyWith( - RunStepDeltaStepDetailsToolCallsFunction value, - $Res Function(RunStepDeltaStepDetailsToolCallsFunction) then) = - _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCallsFunction>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? arguments, - @JsonKey(includeIfNull: false) String? output}); -} - -/// @nodoc -class _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCallsFunction> - implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl( - this._value, this._then); - - // ignore: unused_field - final $Val _value; - // ignore: unused_field - final $Res Function($Val) _then; - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = freezed, - Object? arguments = freezed, - Object? output = freezed, - }) { - return _then(_value.copyWith( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - arguments: freezed == arguments - ? _value.arguments - : arguments // ignore: cast_nullable_to_non_nullable - as String?, - output: freezed == output - ? _value.output - : output // ignore: cast_nullable_to_non_nullable - as String?, - ) as $Val); - } -} - -/// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> - implements $RunStepDeltaStepDetailsToolCallsFunctionCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsFunctionImpl value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) then) = - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res>; - @override - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? arguments, - @JsonKey(includeIfNull: false) String? output}); -} - -/// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl<$Res> - extends _$RunStepDeltaStepDetailsToolCallsFunctionCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> - implements _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith<$Res> { - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsFunctionImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsFunctionImpl) _then) - : super(_value, _then); - - @pragma('vm:prefer-inline') - @override - $Res call({ - Object? name = freezed, - Object? arguments = freezed, - Object? output = freezed, - }) { - return _then(_$RunStepDeltaStepDetailsToolCallsFunctionImpl( - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - arguments: freezed == arguments - ? _value.arguments - : arguments // ignore: cast_nullable_to_non_nullable - as String?, - output: freezed == output - ? _value.output - : output // ignore: cast_nullable_to_non_nullable - as String?, - )); - } -} - -/// @nodoc -@JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsFunctionImpl - extends _RunStepDeltaStepDetailsToolCallsFunction { - const _$RunStepDeltaStepDetailsToolCallsFunctionImpl( - {@JsonKey(includeIfNull: false) this.name, - @JsonKey(includeIfNull: false) this.arguments, - @JsonKey(includeIfNull: false) this.output}) - : super._(); - - factory _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson( - Map json) => - _$$RunStepDeltaStepDetailsToolCallsFunctionImplFromJson(json); - - /// The name of the function. - @override - @JsonKey(includeIfNull: false) - final String? name; - - /// The arguments passed to the function. - @override - @JsonKey(includeIfNull: false) - final String? arguments; - - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @override - @JsonKey(includeIfNull: false) - final String? output; - - @override - String toString() { - return 'RunStepDeltaStepDetailsToolCallsFunction(name: $name, arguments: $arguments, output: $output)'; - } - - @override - bool operator ==(Object other) { - return identical(this, other) || - (other.runtimeType == runtimeType && - other is _$RunStepDeltaStepDetailsToolCallsFunctionImpl && - (identical(other.name, name) || other.name == name) && - (identical(other.arguments, arguments) || - other.arguments == arguments) && - (identical(other.output, output) || other.output == output)); - } - - @JsonKey(ignore: true) - @override - int get hashCode => Object.hash(runtimeType, name, arguments, output); - - @JsonKey(ignore: true) - @override - @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl>(this, _$identity); - - @override - Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsFunctionImplToJson( - this, - ); - } -} - -abstract class _RunStepDeltaStepDetailsToolCallsFunction - extends RunStepDeltaStepDetailsToolCallsFunction { - const factory _RunStepDeltaStepDetailsToolCallsFunction( - {@JsonKey(includeIfNull: false) final String? name, - @JsonKey(includeIfNull: false) final String? arguments, - @JsonKey(includeIfNull: false) final String? output}) = - _$RunStepDeltaStepDetailsToolCallsFunctionImpl; - const _RunStepDeltaStepDetailsToolCallsFunction._() : super._(); - - factory _RunStepDeltaStepDetailsToolCallsFunction.fromJson( - Map json) = - _$RunStepDeltaStepDetailsToolCallsFunctionImpl.fromJson; - - @override - - /// The name of the function. - @JsonKey(includeIfNull: false) - String? get name; - @override - - /// The arguments passed to the function. + /// Code interpreter image output. @JsonKey(includeIfNull: false) - String? get arguments; - @override + RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; - /// The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. - @JsonKey(includeIfNull: false) - String? get output; + /// Create a copy of RunStepDeltaStepDetailsToolCallsCodeOutput + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsFunctionImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsFunctionImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< + _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDetailsToolCallsCodeOutput _$RunStepDetailsToolCallsCodeOutputFromJson( +ChunkingStrategyRequestParam _$ChunkingStrategyRequestParamFromJson( Map json) { switch (json['type']) { - case 'logs': - return RunStepDetailsToolCallsCodeOutputLogsObject.fromJson(json); - case 'image': - return RunStepDetailsToolCallsCodeOutputImageObject.fromJson(json); + case 'auto': + return AutoChunkingStrategyRequestParam.fromJson(json); + case 'static': + return StaticChunkingStrategyRequestParam.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'RunStepDetailsToolCallsCodeOutput', + 'ChunkingStrategyRequestParam', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDetailsToolCallsCodeOutput { - /// Always `logs`. +mixin _$ChunkingStrategyRequestParam { + /// Always `auto`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function(String type, String logs) logs, - required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, - TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyRequestParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDetailsToolCallsCodeOutputCopyWith + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ChunkingStrategyRequestParamCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory $RunStepDetailsToolCallsCodeOutputCopyWith( - RunStepDetailsToolCallsCodeOutput value, - $Res Function(RunStepDetailsToolCallsCodeOutput) then) = - _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - RunStepDetailsToolCallsCodeOutput>; +abstract class $ChunkingStrategyRequestParamCopyWith<$Res> { + factory $ChunkingStrategyRequestParamCopyWith( + ChunkingStrategyRequestParam value, + $Res Function(ChunkingStrategyRequestParam) then) = + _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + ChunkingStrategyRequestParam>; @useResult $Res call({String type}); } /// @nodoc -class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - $Val extends RunStepDetailsToolCallsCodeOutput> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - _$RunStepDetailsToolCallsCodeOutputCopyWithImpl(this._value, this._then); +class _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyRequestParam> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + _$ChunkingStrategyRequestParamCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -60576,128 +69359,113 @@ class _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) - then) = - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res>; +abstract class _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$AutoChunkingStrategyRequestParamImplCopyWith( + _$AutoChunkingStrategyRequestParamImpl value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) then) = + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, String logs}); + $Res call({String type}); } /// @nodoc -class __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> - implements - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl) _then) +class __$$AutoChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$AutoChunkingStrategyRequestParamImpl> + implements _$$AutoChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$AutoChunkingStrategyRequestParamImplCopyWithImpl( + _$AutoChunkingStrategyRequestParamImpl _value, + $Res Function(_$AutoChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? logs = null, }) { - return _then(_$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( + return _then(_$AutoChunkingStrategyRequestParamImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - logs: null == logs - ? _value.logs - : logs // ignore: cast_nullable_to_non_nullable - as String, )); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl - extends RunStepDetailsToolCallsCodeOutputLogsObject { - const _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl( - {required this.type, required this.logs}) +class _$AutoChunkingStrategyRequestParamImpl + extends AutoChunkingStrategyRequestParam { + const _$AutoChunkingStrategyRequestParamImpl({required this.type}) : super._(); - factory _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( + factory _$AutoChunkingStrategyRequestParamImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); + _$$AutoChunkingStrategyRequestParamImplFromJson(json); - /// Always `logs`. + /// Always `auto`. @override final String type; - /// The text output from the Code Interpreter tool call. - @override - final String logs; - @override String toString() { - return 'RunStepDetailsToolCallsCodeOutput.logs(type: $type, logs: $logs)'; + return 'ChunkingStrategyRequestParam.auto(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl && - (identical(other.type, type) || other.type == type) && - (identical(other.logs, logs) || other.logs == logs)); + other is _$AutoChunkingStrategyRequestParamImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, logs); + int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl>( - this, _$identity); + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> + get copyWith => __$$AutoChunkingStrategyRequestParamImplCopyWithImpl< + _$AutoChunkingStrategyRequestParamImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, String logs) logs, - required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, }) { - return logs(type, this.logs); + return auto(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, - TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, }) { - return logs?.call(type, this.logs); + return auto?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, required TResult orElse(), }) { - if (logs != null) { - return logs(type, this.logs); + if (auto != null) { + return auto(type); } return orElse(); } @@ -60705,205 +69473,195 @@ class _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, }) { - return logs(this); + return auto(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, }) { - return logs?.call(this); + return auto?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, required TResult orElse(), }) { - if (logs != null) { - return logs(this); + if (auto != null) { + return auto(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplToJson( + return _$$AutoChunkingStrategyRequestParamImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsCodeOutputLogsObject - extends RunStepDetailsToolCallsCodeOutput { - const factory RunStepDetailsToolCallsCodeOutputLogsObject( - {required final String type, required final String logs}) = - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl; - const RunStepDetailsToolCallsCodeOutputLogsObject._() : super._(); +abstract class AutoChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory AutoChunkingStrategyRequestParam({required final String type}) = + _$AutoChunkingStrategyRequestParamImpl; + const AutoChunkingStrategyRequestParam._() : super._(); - factory RunStepDetailsToolCallsCodeOutputLogsObject.fromJson( - Map json) = - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; + factory AutoChunkingStrategyRequestParam.fromJson(Map json) = + _$AutoChunkingStrategyRequestParamImpl.fromJson; + /// Always `auto`. @override - - /// Always `logs`. String get type; - /// The text output from the Code Interpreter tool call. - String get logs; + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputLogsObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AutoChunkingStrategyRequestParamImplCopyWith< + _$AutoChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> - implements $RunStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) - then) = - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res>; +abstract class _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> + implements $ChunkingStrategyRequestParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyRequestParamImplCopyWith( + _$StaticChunkingStrategyRequestParamImpl value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) then) = + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res>; @override @useResult - $Res call({String type, RunStepDetailsToolCallsCodeOutputImage image}); + $Res call({String type, StaticChunkingStrategy static}); - $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image; + $StaticChunkingStrategyCopyWith<$Res> get static; } /// @nodoc -class __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl<$Res> - extends _$RunStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> - implements - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith<$Res> { - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl _value, - $Res Function(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl) _then) +class __$$StaticChunkingStrategyRequestParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyRequestParamCopyWithImpl<$Res, + _$StaticChunkingStrategyRequestParamImpl> + implements _$$StaticChunkingStrategyRequestParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyRequestParamImplCopyWithImpl( + _$StaticChunkingStrategyRequestParamImpl _value, + $Res Function(_$StaticChunkingStrategyRequestParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? type = null, - Object? image = null, + Object? static = null, }) { - return _then(_$RunStepDetailsToolCallsCodeOutputImageObjectImpl( + return _then(_$StaticChunkingStrategyRequestParamImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - image: null == image - ? _value.image - : image // ignore: cast_nullable_to_non_nullable - as RunStepDetailsToolCallsCodeOutputImage, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, )); } + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') - $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res> get image { - return $RunStepDetailsToolCallsCodeOutputImageCopyWith<$Res>(_value.image, - (value) { - return _then(_value.copyWith(image: value)); + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); }); } } /// @nodoc @JsonSerializable() -class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl - extends RunStepDetailsToolCallsCodeOutputImageObject { - const _$RunStepDetailsToolCallsCodeOutputImageObjectImpl( - {required this.type, required this.image}) +class _$StaticChunkingStrategyRequestParamImpl + extends StaticChunkingStrategyRequestParam { + const _$StaticChunkingStrategyRequestParamImpl( + {required this.type, required this.static}) : super._(); - factory _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( + factory _$StaticChunkingStrategyRequestParamImpl.fromJson( Map json) => - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplFromJson(json); + _$$StaticChunkingStrategyRequestParamImplFromJson(json); - /// Always `image`. + /// Always `static`. @override final String type; - /// Code interpreter image output. + /// Static chunking strategy @override - final RunStepDetailsToolCallsCodeOutputImage image; + final StaticChunkingStrategy static; @override String toString() { - return 'RunStepDetailsToolCallsCodeOutput.image(type: $type, image: $image)'; + return 'ChunkingStrategyRequestParam.static(type: $type, static: $static)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$RunStepDetailsToolCallsCodeOutputImageObjectImpl && + other is _$StaticChunkingStrategyRequestParamImpl && (identical(other.type, type) || other.type == type) && - (identical(other.image, image) || other.image == image)); + (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, type, image); + int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> - get copyWith => - __$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl>( - this, _$identity); + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> + get copyWith => __$$StaticChunkingStrategyRequestParamImplCopyWithImpl< + _$StaticChunkingStrategyRequestParamImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function(String type, String logs) logs, - required TResult Function( - String type, RunStepDetailsToolCallsCodeOutputImage image) - image, + required TResult Function(String type) auto, + required TResult Function(String type, StaticChunkingStrategy static) + static, }) { - return image(type, this.image); + return static(type, this.static); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(String type, String logs)? logs, - TResult? Function( - String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult? Function(String type)? auto, + TResult? Function(String type, StaticChunkingStrategy static)? static, }) { - return image?.call(type, this.image); + return static?.call(type, this.static); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(String type, String logs)? logs, - TResult Function(String type, RunStepDetailsToolCallsCodeOutputImage image)? - image, + TResult Function(String type)? auto, + TResult Function(String type, StaticChunkingStrategy static)? static, required TResult orElse(), }) { - if (image != null) { - return image(type, this.image); + if (static != null) { + return static(type, this.static); } return orElse(); } @@ -60911,211 +69669,171 @@ class _$RunStepDetailsToolCallsCodeOutputImageObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(AutoChunkingStrategyRequestParam value) auto, + required TResult Function(StaticChunkingStrategyRequestParam value) static, }) { - return image(this); + return static(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult? Function(RunStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(AutoChunkingStrategyRequestParam value)? auto, + TResult? Function(StaticChunkingStrategyRequestParam value)? static, }) { - return image?.call(this); + return static?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function(RunStepDetailsToolCallsCodeOutputLogsObject value)? logs, - TResult Function(RunStepDetailsToolCallsCodeOutputImageObject value)? image, + TResult Function(AutoChunkingStrategyRequestParam value)? auto, + TResult Function(StaticChunkingStrategyRequestParam value)? static, required TResult orElse(), }) { - if (image != null) { - return image(this); + if (static != null) { + return static(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDetailsToolCallsCodeOutputImageObjectImplToJson( + return _$$StaticChunkingStrategyRequestParamImplToJson( this, ); } } -abstract class RunStepDetailsToolCallsCodeOutputImageObject - extends RunStepDetailsToolCallsCodeOutput { - const factory RunStepDetailsToolCallsCodeOutputImageObject( +abstract class StaticChunkingStrategyRequestParam + extends ChunkingStrategyRequestParam { + const factory StaticChunkingStrategyRequestParam( {required final String type, - required final RunStepDetailsToolCallsCodeOutputImage image}) = - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl; - const RunStepDetailsToolCallsCodeOutputImageObject._() : super._(); + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyRequestParamImpl; + const StaticChunkingStrategyRequestParam._() : super._(); - factory RunStepDetailsToolCallsCodeOutputImageObject.fromJson( + factory StaticChunkingStrategyRequestParam.fromJson( Map json) = - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; + _$StaticChunkingStrategyRequestParamImpl.fromJson; + /// Always `static`. @override - - /// Always `image`. String get type; - /// Code interpreter image output. - RunStepDetailsToolCallsCodeOutputImage get image; + /// Static chunking strategy + StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyRequestParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDetailsToolCallsCodeOutputImageObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$StaticChunkingStrategyRequestParamImplCopyWith< + _$StaticChunkingStrategyRequestParamImpl> get copyWith => throw _privateConstructorUsedError; } -RunStepDeltaStepDetailsToolCallsCodeOutput - _$RunStepDeltaStepDetailsToolCallsCodeOutputFromJson( - Map json) { +ChunkingStrategyResponseParam _$ChunkingStrategyResponseParamFromJson( + Map json) { switch (json['type']) { - case 'logs': - return RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( - json); - case 'image': - return RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( - json); + case 'static': + return StaticChunkingStrategyResponseParam.fromJson(json); + case 'other': + return OtherChunkingStrategyResponseParam.fromJson(json); default: throw CheckedFromJsonException( json, 'type', - 'RunStepDeltaStepDetailsToolCallsCodeOutput', + 'ChunkingStrategyResponseParam', 'Invalid union type "${json['type']}"!'); } } /// @nodoc -mixin _$RunStepDeltaStepDetailsToolCallsCodeOutput { - /// The index of the output in the outputs array. - int get index => throw _privateConstructorUsedError; - - /// Always `logs`. +mixin _$ChunkingStrategyResponseParam { + /// Always `static`. String get type => throw _privateConstructorUsedError; @optionalTypeArgs TResult when({ - required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, - required TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult? Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, required TResult orElse(), }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult map({ - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, }) => throw _privateConstructorUsedError; @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this ChunkingStrategyResponseParam to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith< - RunStepDeltaStepDetailsToolCallsCodeOutput> + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $ChunkingStrategyResponseParamCopyWith get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith( - RunStepDeltaStepDetailsToolCallsCodeOutput value, - $Res Function(RunStepDeltaStepDetailsToolCallsCodeOutput) then) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - RunStepDeltaStepDetailsToolCallsCodeOutput>; +abstract class $ChunkingStrategyResponseParamCopyWith<$Res> { + factory $ChunkingStrategyResponseParamCopyWith( + ChunkingStrategyResponseParam value, + $Res Function(ChunkingStrategyResponseParam) then) = + _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + ChunkingStrategyResponseParam>; @useResult - $Res call({int index, String type}); + $Res call({String type}); } /// @nodoc -class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - $Val extends RunStepDeltaStepDetailsToolCallsCodeOutput> - implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl( - this._value, this._then); +class _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + $Val extends ChunkingStrategyResponseParam> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + _$ChunkingStrategyResponseParamCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, }) { return _then(_value.copyWith( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable @@ -61125,164 +69843,136 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl value, - $Res Function( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) - then) = - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - $Res>; +abstract class _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$StaticChunkingStrategyResponseParamImplCopyWith( + _$StaticChunkingStrategyResponseParamImpl value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) then) = + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, String type, @JsonKey(includeIfNull: false) String? logs}); + $Res call({String type, StaticChunkingStrategy static}); + + $StaticChunkingStrategyCopyWith<$Res> get static; } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - $Res> - extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> - implements - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - $Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl) - _then) +class __$$StaticChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$StaticChunkingStrategyResponseParamImpl> + implements _$$StaticChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$StaticChunkingStrategyResponseParamImplCopyWithImpl( + _$StaticChunkingStrategyResponseParamImpl _value, + $Res Function(_$StaticChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? logs = freezed, + Object? static = null, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$StaticChunkingStrategyResponseParamImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - logs: freezed == logs - ? _value.logs - : logs // ignore: cast_nullable_to_non_nullable - as String?, + static: null == static + ? _value.static + : static // ignore: cast_nullable_to_non_nullable + as StaticChunkingStrategy, )); } + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $StaticChunkingStrategyCopyWith<$Res> get static { + return $StaticChunkingStrategyCopyWith<$Res>(_value.static, (value) { + return _then(_value.copyWith(static: value)); + }); + } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject { - const _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.logs}) +class _$StaticChunkingStrategyResponseParamImpl + extends StaticChunkingStrategyResponseParam { + const _$StaticChunkingStrategyResponseParamImpl( + {required this.type, required this.static}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson( + factory _$StaticChunkingStrategyResponseParamImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson(json); - - /// The index of the output in the outputs array. - @override - final int index; + _$$StaticChunkingStrategyResponseParamImplFromJson(json); - /// Always `logs`. + /// Always `static`. @override final String type; - /// The text output from the Code Interpreter tool call. + /// Static chunking strategy @override - @JsonKey(includeIfNull: false) - final String? logs; + final StaticChunkingStrategy static; @override String toString() { - return 'RunStepDeltaStepDetailsToolCallsCodeOutput.logs(index: $index, type: $type, logs: $logs)'; + return 'ChunkingStrategyResponseParam.static(type: $type, static: $static)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl && - (identical(other.index, index) || other.index == index) && + other is _$StaticChunkingStrategyResponseParamImpl && (identical(other.type, type) || other.type == type) && - (identical(other.logs, logs) || other.logs == logs)); + (identical(other.static, static) || other.static == static)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, type, logs); + int get hashCode => Object.hash(runtimeType, type, static); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl>( - this, _$identity); + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> + get copyWith => __$$StaticChunkingStrategyResponseParamImplCopyWithImpl< + _$StaticChunkingStrategyResponseParamImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, - required TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, }) { - return logs(index, type, this.logs); + return static(type, this.static); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult? Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, }) { - return logs?.call(index, type, this.logs); + return static?.call(type, this.static); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, required TResult orElse(), }) { - if (logs != null) { - return logs(index, type, this.logs); + if (static != null) { + return static(type, this.static); } return orElse(); } @@ -61290,264 +69980,178 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, }) { - return logs(this); + return static(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, }) { - return logs?.call(this); + return static?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, required TResult orElse(), }) { - if (logs != null) { - return logs(this); + if (static != null) { + return static(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplToJson( + return _$$StaticChunkingStrategyResponseParamImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject - extends RunStepDeltaStepDetailsToolCallsCodeOutput { - const factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) final String? logs}) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject._() : super._(); +abstract class StaticChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory StaticChunkingStrategyResponseParam( + {required final String type, + required final StaticChunkingStrategy static}) = + _$StaticChunkingStrategyResponseParamImpl; + const StaticChunkingStrategyResponseParam._() : super._(); - factory RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject.fromJson( + factory StaticChunkingStrategyResponseParam.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl.fromJson; + _$StaticChunkingStrategyResponseParamImpl.fromJson; + /// Always `static`. @override - - /// The index of the output in the outputs array. - int get index; - @override - - /// Always `logs`. String get type; - /// The text output from the Code Interpreter tool call. - @JsonKey(includeIfNull: false) - String? get logs; + /// Static chunking strategy + StaticChunkingStrategy get static; + + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$StaticChunkingStrategyResponseParamImplCopyWith< + _$StaticChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - $Res> implements $RunStepDeltaStepDetailsToolCallsCodeOutputCopyWith<$Res> { - factory _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl value, - $Res Function( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) - then) = - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - $Res>; +abstract class _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> + implements $ChunkingStrategyResponseParamCopyWith<$Res> { + factory _$$OtherChunkingStrategyResponseParamImplCopyWith( + _$OtherChunkingStrategyResponseParamImpl value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) then) = + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res>; @override @useResult - $Res call( - {int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}); - - $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image; + $Res call({String type}); } /// @nodoc -class __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - $Res> - extends _$RunStepDeltaStepDetailsToolCallsCodeOutputCopyWithImpl<$Res, - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> - implements - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - $Res> { - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl( - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _value, - $Res Function(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl) - _then) +class __$$OtherChunkingStrategyResponseParamImplCopyWithImpl<$Res> + extends _$ChunkingStrategyResponseParamCopyWithImpl<$Res, + _$OtherChunkingStrategyResponseParamImpl> + implements _$$OtherChunkingStrategyResponseParamImplCopyWith<$Res> { + __$$OtherChunkingStrategyResponseParamImplCopyWithImpl( + _$OtherChunkingStrategyResponseParamImpl _value, + $Res Function(_$OtherChunkingStrategyResponseParamImpl) _then) : super(_value, _then); + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ - Object? index = null, Object? type = null, - Object? image = freezed, }) { - return _then(_$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - index: null == index - ? _value.index - : index // ignore: cast_nullable_to_non_nullable - as int, + return _then(_$OtherChunkingStrategyResponseParamImpl( type: null == type ? _value.type : type // ignore: cast_nullable_to_non_nullable as String, - image: freezed == image - ? _value.image - : image // ignore: cast_nullable_to_non_nullable - as RunStepDeltaStepDetailsToolCallsCodeOutputImage?, )); } - - @override - @pragma('vm:prefer-inline') - $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>? get image { - if (_value.image == null) { - return null; - } - - return $RunStepDeltaStepDetailsToolCallsCodeOutputImageCopyWith<$Res>( - _value.image!, (value) { - return _then(_value.copyWith(image: value)); - }); - } } /// @nodoc @JsonSerializable() -class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl - extends RunStepDeltaStepDetailsToolCallsCodeOutputImageObject { - const _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - {required this.index, - required this.type, - @JsonKey(includeIfNull: false) this.image}) +class _$OtherChunkingStrategyResponseParamImpl + extends OtherChunkingStrategyResponseParam { + const _$OtherChunkingStrategyResponseParamImpl({required this.type}) : super._(); - factory _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson( + factory _$OtherChunkingStrategyResponseParamImpl.fromJson( Map json) => - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( - json); - - /// The index of the output in the outputs array. - @override - final int index; + _$$OtherChunkingStrategyResponseParamImplFromJson(json); - /// Always `image`. + /// Always `other`. @override final String type; - /// Code interpreter image output. - @override - @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image; - @override String toString() { - return 'RunStepDeltaStepDetailsToolCallsCodeOutput.image(index: $index, type: $type, image: $image)'; + return 'ChunkingStrategyResponseParam.other(type: $type)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other - is _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl && - (identical(other.index, index) || other.index == index) && - (identical(other.type, type) || other.type == type) && - (identical(other.image, image) || other.image == image)); + other is _$OtherChunkingStrategyResponseParamImpl && + (identical(other.type, type) || other.type == type)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override - int get hashCode => Object.hash(runtimeType, index, type, image); + int get hashCode => Object.hash(runtimeType, type); - @JsonKey(ignore: true) + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> - get copyWith => - __$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWithImpl< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl>( - this, _$identity); + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> + get copyWith => __$$OtherChunkingStrategyResponseParamImplCopyWithImpl< + _$OtherChunkingStrategyResponseParamImpl>(this, _$identity); @override @optionalTypeArgs TResult when({ - required TResult Function( - int index, String type, @JsonKey(includeIfNull: false) String? logs) - logs, - required TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image) - image, + required TResult Function(String type, StaticChunkingStrategy static) + static, + required TResult Function(String type) other, }) { - return image(index, type, this.image); + return other(type); } @override @optionalTypeArgs TResult? whenOrNull({ - TResult? Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult? Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult? Function(String type, StaticChunkingStrategy static)? static, + TResult? Function(String type)? other, }) { - return image?.call(index, type, this.image); + return other?.call(type); } @override @optionalTypeArgs TResult maybeWhen({ - TResult Function(int index, String type, - @JsonKey(includeIfNull: false) String? logs)? - logs, - TResult Function( - int index, - String type, - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? image)? - image, + TResult Function(String type, StaticChunkingStrategy static)? static, + TResult Function(String type)? other, required TResult orElse(), }) { - if (image != null) { - return image(index, type, this.image); + if (other != null) { + return other(type); } return orElse(); } @@ -61555,84 +70159,62 @@ class _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl @override @optionalTypeArgs TResult map({ - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value) - logs, - required TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value) - image, + required TResult Function(StaticChunkingStrategyResponseParam value) static, + required TResult Function(OtherChunkingStrategyResponseParam value) other, }) { - return image(this); + return other(this); } @override @optionalTypeArgs TResult? mapOrNull({ - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult? Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult? Function(StaticChunkingStrategyResponseParam value)? static, + TResult? Function(OtherChunkingStrategyResponseParam value)? other, }) { - return image?.call(this); + return other?.call(this); } @override @optionalTypeArgs TResult maybeMap({ - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject value)? - logs, - TResult Function( - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject value)? - image, + TResult Function(StaticChunkingStrategyResponseParam value)? static, + TResult Function(OtherChunkingStrategyResponseParam value)? other, required TResult orElse(), }) { - if (image != null) { - return image(this); + if (other != null) { + return other(this); } return orElse(); } @override Map toJson() { - return _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplToJson( + return _$$OtherChunkingStrategyResponseParamImplToJson( this, ); } } -abstract class RunStepDeltaStepDetailsToolCallsCodeOutputImageObject - extends RunStepDeltaStepDetailsToolCallsCodeOutput { - const factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject( - {required final int index, - required final String type, - @JsonKey(includeIfNull: false) - final RunStepDeltaStepDetailsToolCallsCodeOutputImage? image}) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl; - const RunStepDeltaStepDetailsToolCallsCodeOutputImageObject._() : super._(); +abstract class OtherChunkingStrategyResponseParam + extends ChunkingStrategyResponseParam { + const factory OtherChunkingStrategyResponseParam( + {required final String type}) = _$OtherChunkingStrategyResponseParamImpl; + const OtherChunkingStrategyResponseParam._() : super._(); - factory RunStepDeltaStepDetailsToolCallsCodeOutputImageObject.fromJson( + factory OtherChunkingStrategyResponseParam.fromJson( Map json) = - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl.fromJson; - - @override + _$OtherChunkingStrategyResponseParamImpl.fromJson; - /// The index of the output in the outputs array. - int get index; + /// Always `other`. @override - - /// Always `image`. String get type; - /// Code interpreter image output. - @JsonKey(includeIfNull: false) - RunStepDeltaStepDetailsToolCallsCodeOutputImage? get image; + /// Create a copy of ChunkingStrategyResponseParam + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) - _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplCopyWith< - _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl> + @JsonKey(includeFromJson: false, includeToJson: false) + _$$OtherChunkingStrategyResponseParamImplCopyWith< + _$OtherChunkingStrategyResponseParamImpl> get copyWith => throw _privateConstructorUsedError; } @@ -61753,8 +70335,13 @@ mixin _$AssistantStreamEvent { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this AssistantStreamEvent to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $AssistantStreamEventCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -61779,6 +70366,8 @@ class _$AssistantStreamEventCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61814,6 +70403,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> $Res Function(_$ThreadStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -61832,6 +70423,8 @@ class __$$ThreadStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ThreadObjectCopyWith<$Res> get data { @@ -61872,11 +70465,13 @@ class _$ThreadStreamEventImpl extends ThreadStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => @@ -62009,16 +70604,18 @@ abstract class ThreadStreamEvent extends AssistantStreamEvent { factory ThreadStreamEvent.fromJson(Map json) = _$ThreadStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). + @override ThreadObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ThreadStreamEventImplCopyWith<_$ThreadStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -62044,6 +70641,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> _$RunStreamEventImpl _value, $Res Function(_$RunStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62062,6 +70661,8 @@ class __$$RunStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunObjectCopyWith<$Res> get data { @@ -62102,11 +70703,13 @@ class _$RunStreamEventImpl extends RunStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => @@ -62239,16 +70842,18 @@ abstract class RunStreamEvent extends AssistantStreamEvent { factory RunStreamEvent.fromJson(Map json) = _$RunStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). + @override RunObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStreamEventImplCopyWith<_$RunStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -62274,6 +70879,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62292,6 +70899,8 @@ class __$$RunStepStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepObjectCopyWith<$Res> get data { @@ -62332,11 +70941,13 @@ class _$RunStepStreamEventImpl extends RunStepStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => @@ -62469,16 +71080,18 @@ abstract class RunStepStreamEvent extends AssistantStreamEvent { factory RunStepStreamEvent.fromJson(Map json) = _$RunStepStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a step in execution of a run. + @override RunStepObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamEventImplCopyWith<_$RunStepStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -62507,6 +71120,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$RunStepStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62525,6 +71140,8 @@ class __$$RunStepStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RunStepDeltaObjectCopyWith<$Res> get data { @@ -62565,11 +71182,13 @@ class _$RunStepStreamDeltaEventImpl extends RunStepStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> @@ -62702,16 +71321,18 @@ abstract class RunStepStreamDeltaEvent extends AssistantStreamEvent { factory RunStepStreamDeltaEvent.fromJson(Map json) = _$RunStepStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a run step delta i.e. any changed fields on a run step during streaming. + @override RunStepDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RunStepStreamDeltaEventImplCopyWith<_$RunStepStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -62737,6 +71358,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62755,6 +71378,8 @@ class __$$MessageStreamEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageObjectCopyWith<$Res> get data { @@ -62795,11 +71420,13 @@ class _$MessageStreamEventImpl extends MessageStreamEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => @@ -62932,16 +71559,18 @@ abstract class MessageStreamEvent extends AssistantStreamEvent { factory MessageStreamEvent.fromJson(Map json) = _$MessageStreamEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). + @override MessageObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamEventImplCopyWith<_$MessageStreamEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -62970,6 +71599,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> $Res Function(_$MessageStreamDeltaEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -62988,6 +71619,8 @@ class __$$MessageStreamDeltaEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $MessageDeltaObjectCopyWith<$Res> get data { @@ -63028,11 +71661,13 @@ class _$MessageStreamDeltaEventImpl extends MessageStreamDeltaEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> @@ -63165,16 +71800,18 @@ abstract class MessageStreamDeltaEvent extends AssistantStreamEvent { factory MessageStreamDeltaEvent.fromJson(Map json) = _$MessageStreamDeltaEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents a message delta i.e. any changed fields on a message during streaming. + @override MessageDeltaObject get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$MessageStreamDeltaEventImplCopyWith<_$MessageStreamDeltaEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -63200,6 +71837,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> _$ErrorEventImpl _value, $Res Function(_$ErrorEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63218,6 +71857,8 @@ class __$$ErrorEventImplCopyWithImpl<$Res> )); } + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ErrorCopyWith<$Res> get data { @@ -63257,11 +71898,13 @@ class _$ErrorEventImpl extends ErrorEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => @@ -63393,16 +72036,18 @@ abstract class ErrorEvent extends AssistantStreamEvent { factory ErrorEvent.fromJson(Map json) = _$ErrorEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// Represents an error that occurred during an API request. + @override Error get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ErrorEventImplCopyWith<_$ErrorEventImpl> get copyWith => throw _privateConstructorUsedError; } @@ -63426,6 +72071,8 @@ class __$$DoneEventImplCopyWithImpl<$Res> _$DoneEventImpl _value, $Res Function(_$DoneEventImpl) _then) : super(_value, _then); + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -63475,11 +72122,13 @@ class _$DoneEventImpl extends DoneEvent { (identical(other.data, data) || other.data == data)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, event, data); - @JsonKey(ignore: true) + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => @@ -63611,16 +72260,18 @@ abstract class DoneEvent extends AssistantStreamEvent { factory DoneEvent.fromJson(Map json) = _$DoneEventImpl.fromJson; - @override - /// The type of the event. - EventType get event; @override + EventType get event; /// No Description + @override String get data; + + /// Create a copy of AssistantStreamEvent + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DoneEventImplCopyWith<_$DoneEventImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 03a49b59..c57effb3 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -13,17 +13,17 @@ _$CreateCompletionRequestImpl _$$CreateCompletionRequestImplFromJson( _$CreateCompletionRequestImpl( model: const _CompletionModelConverter().fromJson(json['model']), prompt: const _CompletionPromptConverter().fromJson(json['prompt']), - bestOf: json['best_of'] as int?, + bestOf: (json['best_of'] as num?)?.toInt(), echo: json['echo'] as bool? ?? false, frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), - logprobs: json['logprobs'] as int?, - maxTokens: json['max_tokens'] as int? ?? 16, - n: json['n'] as int? ?? 1, + logprobs: (json['logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt() ?? 16, + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), stop: const _CompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -113,7 +113,8 @@ _$CompletionPromptListListIntImpl _$$CompletionPromptListListIntImplFromJson( Map json) => _$CompletionPromptListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -128,7 +129,7 @@ Map _$$CompletionPromptListListIntImplToJson( _$CompletionPromptListIntImpl _$$CompletionPromptListIntImplFromJson( Map json) => _$CompletionPromptListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -202,7 +203,7 @@ _$CreateCompletionResponseImpl _$$CreateCompletionResponseImplFromJson( choices: (json['choices'] as List) .map((e) => CompletionChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, systemFingerprint: json['system_fingerprint'] as String?, object: @@ -243,7 +244,7 @@ _$CompletionChoiceImpl _$$CompletionChoiceImplFromJson( finishReason: $enumDecodeNullable( _$CompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int, + index: (json['index'] as num).toInt(), logprobs: json['logprobs'] == null ? null : CompletionLogprobs.fromJson( @@ -270,7 +271,7 @@ _$CompletionLogprobsImpl _$$CompletionLogprobsImplFromJson( Map json) => _$CompletionLogprobsImpl( textOffset: (json['text_offset'] as List?) - ?.map((e) => e as int) + ?.map((e) => (e as num).toInt()) .toList(), tokenLogprobs: (json['token_logprobs'] as List?) ?.map((e) => (e as num?)?.toDouble()) @@ -310,18 +311,22 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, logitBias: (json['logit_bias'] as Map?)?.map( - (k, e) => MapEntry(k, e as int), + (k, e) => MapEntry(k, (e as num).toInt()), ), logprobs: json['logprobs'] as bool?, - topLogprobs: json['top_logprobs'] as int?, - maxTokens: json['max_tokens'] as int?, - n: json['n'] as int? ?? 1, + topLogprobs: (json['top_logprobs'] as num?)?.toInt(), + maxTokens: (json['max_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), + n: (json['n'] as num?)?.toInt() ?? 1, presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, responseFormat: json['response_format'] == null ? null - : ChatCompletionResponseFormat.fromJson( + : ResponseFormat.fromJson( json['response_format'] as Map), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), + serviceTier: $enumDecodeNullable( + _$CreateChatCompletionRequestServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), stop: const _ChatCompletionStopConverter().fromJson(json['stop']), stream: json['stream'] as bool? ?? false, streamOptions: json['stream_options'] == null @@ -335,6 +340,7 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .toList(), toolChoice: const _ChatCompletionToolChoiceOptionConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, user: json['user'] as String?, functionCall: const _ChatCompletionFunctionCallConverter() .fromJson(json['function_call']), @@ -361,10 +367,13 @@ Map _$$CreateChatCompletionRequestImplToJson( writeNotNull('logprobs', instance.logprobs); writeNotNull('top_logprobs', instance.topLogprobs); writeNotNull('max_tokens', instance.maxTokens); + writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('n', instance.n); writeNotNull('presence_penalty', instance.presencePenalty); writeNotNull('response_format', instance.responseFormat?.toJson()); writeNotNull('seed', instance.seed); + writeNotNull('service_tier', + _$CreateChatCompletionRequestServiceTierEnumMap[instance.serviceTier]); writeNotNull( 'stop', const _ChatCompletionStopConverter().toJson(instance.stop)); writeNotNull('stream', instance.stream); @@ -376,6 +385,7 @@ Map _$$CreateChatCompletionRequestImplToJson( 'tool_choice', const _ChatCompletionToolChoiceOptionConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull('user', instance.user); writeNotNull( 'function_call', @@ -386,6 +396,11 @@ Map _$$CreateChatCompletionRequestImplToJson( return val; } +const _$CreateChatCompletionRequestServiceTierEnumMap = { + CreateChatCompletionRequestServiceTier.auto: 'auto', + CreateChatCompletionRequestServiceTier.vDefault: 'default', +}; + _$ChatCompletionModelEnumerationImpl _$$ChatCompletionModelEnumerationImplFromJson(Map json) => _$ChatCompletionModelEnumerationImpl( @@ -401,6 +416,7 @@ Map _$$ChatCompletionModelEnumerationImplToJson( }; const _$ChatCompletionModelsEnumMap = { + ChatCompletionModels.chatgpt4oLatest: 'chatgpt-4o-latest', ChatCompletionModels.gpt4: 'gpt-4', ChatCompletionModels.gpt432k: 'gpt-4-32k', ChatCompletionModels.gpt432k0314: 'gpt-4-32k-0314', @@ -415,6 +431,9 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt4VisionPreview: 'gpt-4-vision-preview', ChatCompletionModels.gpt4o: 'gpt-4o', ChatCompletionModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ChatCompletionModels.gpt4o20240806: 'gpt-4o-2024-08-06', + ChatCompletionModels.gpt4oMini: 'gpt-4o-mini', + ChatCompletionModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', ChatCompletionModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ChatCompletionModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -422,6 +441,10 @@ const _$ChatCompletionModelsEnumMap = { ChatCompletionModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ChatCompletionModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ChatCompletionModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ChatCompletionModels.o1Mini: 'o1-mini', + ChatCompletionModels.o1Mini20240912: 'o1-mini-2024-09-12', + ChatCompletionModels.o1Preview: 'o1-preview', + ChatCompletionModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ChatCompletionModelStringImpl _$$ChatCompletionModelStringImplFromJson( @@ -438,25 +461,6 @@ Map _$$ChatCompletionModelStringImplToJson( 'runtimeType': instance.$type, }; -_$ChatCompletionResponseFormatImpl _$$ChatCompletionResponseFormatImplFromJson( - Map json) => - _$ChatCompletionResponseFormatImpl( - type: $enumDecodeNullable( - _$ChatCompletionResponseFormatTypeEnumMap, json['type']) ?? - ChatCompletionResponseFormatType.text, - ); - -Map _$$ChatCompletionResponseFormatImplToJson( - _$ChatCompletionResponseFormatImpl instance) => - { - 'type': _$ChatCompletionResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$ChatCompletionResponseFormatTypeEnumMap = { - ChatCompletionResponseFormatType.text: 'text', - ChatCompletionResponseFormatType.jsonObject: 'json_object', -}; - _$ChatCompletionStopListStringImpl _$$ChatCompletionStopListStringImplFromJson( Map json) => _$ChatCompletionStopListStringImpl( @@ -595,6 +599,7 @@ _$FunctionObjectImpl _$$FunctionObjectImplFromJson(Map json) => name: json['name'] as String, description: json['description'] as String?, parameters: json['parameters'] as Map?, + strict: json['strict'] as bool? ?? false, ); Map _$$FunctionObjectImplToJson( @@ -611,6 +616,34 @@ Map _$$FunctionObjectImplToJson( writeNotNull('description', instance.description); writeNotNull('parameters', instance.parameters); + writeNotNull('strict', instance.strict); + return val; +} + +_$JsonSchemaObjectImpl _$$JsonSchemaObjectImplFromJson( + Map json) => + _$JsonSchemaObjectImpl( + name: json['name'] as String, + description: json['description'] as String?, + schema: json['schema'] as Map, + strict: json['strict'] as bool? ?? false, + ); + +Map _$$JsonSchemaObjectImplToJson( + _$JsonSchemaObjectImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['schema'] = instance.schema; + writeNotNull('strict', instance.strict); return val; } @@ -703,8 +736,11 @@ _$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( .map((e) => ChatCompletionResponseChoice.fromJson(e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num).toInt(), model: json['model'] as String, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, object: json['object'] as String, usage: json['usage'] == null @@ -726,19 +762,25 @@ Map _$$CreateChatCompletionResponseImplToJson( val['choices'] = instance.choices.map((e) => e.toJson()).toList(); val['created'] = instance.created; val['model'] = instance.model; + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); val['object'] = instance.object; writeNotNull('usage', instance.usage?.toJson()); return val; } +const _$ServiceTierEnumMap = { + ServiceTier.scale: 'scale', + ServiceTier.vDefault: 'default', +}; + _$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( Map json) => _$ChatCompletionResponseChoiceImpl( finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), message: ChatCompletionAssistantMessage.fromJson( json['message'] as Map), logprobs: json['logprobs'] == null @@ -780,20 +822,35 @@ _$ChatCompletionLogprobsImpl _$$ChatCompletionLogprobsImplFromJson( ?.map((e) => ChatCompletionTokenLogprob.fromJson(e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => + ChatCompletionTokenLogprob.fromJson(e as Map)) + .toList(), ); Map _$$ChatCompletionLogprobsImplToJson( - _$ChatCompletionLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionTokenLogprobImpl _$$ChatCompletionTokenLogprobImplFromJson( Map json) => _$ChatCompletionTokenLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), topLogprobs: (json['top_logprobs'] as List) .map((e) => ChatCompletionTokenTopLogprob.fromJson(e as Map)) @@ -814,8 +871,9 @@ _$ChatCompletionTokenTopLogprobImpl _$ChatCompletionTokenTopLogprobImpl( token: json['token'] as String, logprob: (json['logprob'] as num).toDouble(), - bytes: - (json['bytes'] as List?)?.map((e) => e as int).toList(), + bytes: (json['bytes'] as List?) + ?.map((e) => (e as num).toInt()) + .toList(), ); Map _$$ChatCompletionTokenTopLogprobImplToJson( @@ -835,10 +893,13 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int, + created: (json['created'] as num?)?.toInt(), model: json['model'] as String?, + serviceTier: $enumDecodeNullable( + _$ServiceTierEnumMap, json['service_tier'], + unknownValue: JsonKey.nullForUndefinedEnumValue), systemFingerprint: json['system_fingerprint'] as String?, - object: json['object'] as String, + object: json['object'] as String?, usage: json['usage'] == null ? null : CompletionUsage.fromJson(json['usage'] as Map), @@ -856,10 +917,11 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( writeNotNull('id', instance.id); val['choices'] = instance.choices.map((e) => e.toJson()).toList(); - val['created'] = instance.created; + writeNotNull('created', instance.created); writeNotNull('model', instance.model); + writeNotNull('service_tier', _$ServiceTierEnumMap[instance.serviceTier]); writeNotNull('system_fingerprint', instance.systemFingerprint); - val['object'] = instance.object; + writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); return val; } @@ -877,7 +939,7 @@ _$ChatCompletionStreamResponseChoiceImpl finishReason: $enumDecodeNullable( _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], unknownValue: JsonKey.nullForUndefinedEnumValue), - index: json['index'] as int?, + index: (json['index'] as num?)?.toInt(), ); Map _$$ChatCompletionStreamResponseChoiceImplToJson( @@ -907,19 +969,33 @@ _$ChatCompletionStreamResponseChoiceLogprobsImpl ?.map((e) => ChatCompletionTokenLogprob.fromJson( e as Map)) .toList(), + refusal: (json['refusal'] as List?) + ?.map((e) => ChatCompletionTokenLogprob.fromJson( + e as Map)) + .toList(), ); Map _$$ChatCompletionStreamResponseChoiceLogprobsImplToJson( - _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) => - { - 'content': instance.content?.map((e) => e.toJson()).toList(), - }; + _$ChatCompletionStreamResponseChoiceLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + writeNotNull('refusal', instance.refusal?.map((e) => e.toJson()).toList()); + return val; +} _$ChatCompletionStreamResponseDeltaImpl _$$ChatCompletionStreamResponseDeltaImplFromJson( Map json) => _$ChatCompletionStreamResponseDeltaImpl( content: json['content'] as String?, + refusal: json['refusal'] as String?, functionCall: json['function_call'] == null ? null : ChatCompletionStreamMessageFunctionCall.fromJson( @@ -944,6 +1020,7 @@ Map _$$ChatCompletionStreamResponseDeltaImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('function_call', instance.functionCall?.toJson()); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -986,7 +1063,7 @@ _$ChatCompletionStreamMessageToolCallChunkImpl _$$ChatCompletionStreamMessageToolCallChunkImplFromJson( Map json) => _$ChatCompletionStreamMessageToolCallChunkImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: $enumDecodeNullable( _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap, @@ -1024,18 +1101,53 @@ const _$ChatCompletionStreamMessageToolCallChunkTypeEnumMap = { _$CompletionUsageImpl _$$CompletionUsageImplFromJson( Map json) => _$CompletionUsageImpl( - completionTokens: json['completion_tokens'] as int?, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num?)?.toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), + completionTokensDetails: json['completion_tokens_details'] == null + ? null + : CompletionTokensDetails.fromJson( + json['completion_tokens_details'] as Map), ); Map _$$CompletionUsageImplToJson( - _$CompletionUsageImpl instance) => - { - 'completion_tokens': instance.completionTokens, - 'prompt_tokens': instance.promptTokens, - 'total_tokens': instance.totalTokens, - }; + _$CompletionUsageImpl instance) { + final val = { + 'completion_tokens': instance.completionTokens, + 'prompt_tokens': instance.promptTokens, + 'total_tokens': instance.totalTokens, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'completion_tokens_details', instance.completionTokensDetails?.toJson()); + return val; +} + +_$CompletionTokensDetailsImpl _$$CompletionTokensDetailsImplFromJson( + Map json) => + _$CompletionTokensDetailsImpl( + reasoningTokens: (json['reasoning_tokens'] as num?)?.toInt(), + ); + +Map _$$CompletionTokensDetailsImplToJson( + _$CompletionTokensDetailsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('reasoning_tokens', instance.reasoningTokens); + return val; +} _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( Map json) => @@ -1045,7 +1157,7 @@ _$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( encodingFormat: $enumDecodeNullable( _$EmbeddingEncodingFormatEnumMap, json['encoding_format']) ?? EmbeddingEncodingFormat.float, - dimensions: json['dimensions'] as int?, + dimensions: (json['dimensions'] as num?)?.toInt(), user: json['user'] as String?, ); @@ -1112,7 +1224,8 @@ _$EmbeddingInputListListIntImpl _$$EmbeddingInputListListIntImplFromJson( Map json) => _$EmbeddingInputListListIntImpl( (json['value'] as List) - .map((e) => (e as List).map((e) => e as int).toList()) + .map((e) => + (e as List).map((e) => (e as num).toInt()).toList()) .toList(), $type: json['runtimeType'] as String?, ); @@ -1127,7 +1240,7 @@ Map _$$EmbeddingInputListListIntImplToJson( _$EmbeddingInputListIntImpl _$$EmbeddingInputListIntImplFromJson( Map json) => _$EmbeddingInputListIntImpl( - (json['value'] as List).map((e) => e as int).toList(), + (json['value'] as List).map((e) => (e as num).toInt()).toList(), $type: json['runtimeType'] as String?, ); @@ -1204,7 +1317,7 @@ const _$CreateEmbeddingResponseObjectEnumMap = { _$EmbeddingImpl _$$EmbeddingImplFromJson(Map json) => _$EmbeddingImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), embedding: const _EmbeddingVectorConverter().fromJson(json['embedding']), object: $enumDecode(_$EmbeddingObjectEnumMap, json['object']), ); @@ -1252,8 +1365,8 @@ Map _$$EmbeddingVectorStringImplToJson( _$EmbeddingUsageImpl _$$EmbeddingUsageImplFromJson(Map json) => _$EmbeddingUsageImpl( - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$EmbeddingUsageImplToJson( @@ -1278,7 +1391,7 @@ _$CreateFineTuningJobRequestImpl _$$CreateFineTuningJobRequestImplFromJson( ?.map( (e) => FineTuningIntegration.fromJson(e as Map)) .toList(), - seed: json['seed'] as int?, + seed: (json['seed'] as num?)?.toInt(), ); Map _$$CreateFineTuningJobRequestImplToJson( @@ -1321,6 +1434,7 @@ const _$FineTuningModelsEnumMap = { FineTuningModels.babbage002: 'babbage-002', FineTuningModels.davinci002: 'davinci-002', FineTuningModels.gpt35Turbo: 'gpt-3.5-turbo', + FineTuningModels.gpt4oMini: 'gpt-4o-mini', }; _$FineTuningModelStringImpl _$$FineTuningModelStringImplFromJson( @@ -1340,12 +1454,12 @@ Map _$$FineTuningModelStringImplToJson( _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => _$FineTuningJobImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), error: json['error'] == null ? null : FineTuningJobError.fromJson(json['error'] as Map), fineTunedModel: json['fine_tuned_model'] as String?, - finishedAt: json['finished_at'] as int?, + finishedAt: (json['finished_at'] as num?)?.toInt(), hyperparameters: FineTuningJobHyperparameters.fromJson( json['hyperparameters'] as Map), model: json['model'] as String, @@ -1355,7 +1469,7 @@ _$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => .map((e) => e as String) .toList(), status: $enumDecode(_$FineTuningJobStatusEnumMap, json['status']), - trainedTokens: json['trained_tokens'] as int?, + trainedTokens: (json['trained_tokens'] as num?)?.toInt(), trainingFile: json['training_file'] as String, validationFile: json['validation_file'] as String?, integrations: (json['integrations'] as List?) @@ -1501,7 +1615,7 @@ const _$FineTuningNEpochsOptionsEnumMap = { _$FineTuningNEpochsIntImpl _$$FineTuningNEpochsIntImplFromJson( Map json) => _$FineTuningNEpochsIntImpl( - json['value'] as int, + (json['value'] as num).toInt(), $type: json['runtimeType'] as String?, ); @@ -1605,7 +1719,7 @@ _$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( Map json) => _$FineTuningJobEventImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), level: $enumDecode(_$FineTuningJobEventLevelEnumMap, json['level']), message: json['message'] as String, object: $enumDecode(_$FineTuningJobEventObjectEnumMap, json['object']), @@ -1635,9 +1749,9 @@ _$FineTuningJobCheckpointImpl _$$FineTuningJobCheckpointImplFromJson( Map json) => _$FineTuningJobCheckpointImpl( id: json['id'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), fineTunedModelCheckpoint: json['fine_tuned_model_checkpoint'] as String, - stepNumber: json['step_number'] as int, + stepNumber: (json['step_number'] as num).toInt(), metrics: FineTuningJobCheckpointMetrics.fromJson( json['metrics'] as Map), fineTuningJobId: json['fine_tuning_job_id'] as String, @@ -1705,7 +1819,7 @@ _$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( model: json['model'] == null ? const CreateImageRequestModelString('dall-e-2') : const _CreateImageRequestModelConverter().fromJson(json['model']), - n: json['n'] as int? ?? 1, + n: (json['n'] as num?)?.toInt() ?? 1, quality: $enumDecodeNullable(_$ImageQualityEnumMap, json['quality']) ?? ImageQuality.standard, responseFormat: $enumDecodeNullable( @@ -1804,7 +1918,7 @@ Map _$$CreateImageRequestModelStringImplToJson( _$ImagesResponseImpl _$$ImagesResponseImplFromJson(Map json) => _$ImagesResponseImpl( - created: json['created'] as int, + created: (json['created'] as num).toInt(), data: (json['data'] as List) .map((e) => Image.fromJson(e as Map)) .toList(), @@ -1840,7 +1954,7 @@ Map _$$ImageImplToJson(_$ImageImpl instance) { _$ModelImpl _$$ModelImplFromJson(Map json) => _$ModelImpl( id: json['id'] as String, - created: json['created'] as int, + created: (json['created'] as num).toInt(), object: $enumDecode(_$ModelObjectEnumMap, json['object']), ownedBy: json['owned_by'] as String, ); @@ -2082,7 +2196,7 @@ _$AssistantObjectImpl _$$AssistantObjectImplFromJson( _$AssistantObjectImpl( id: json['id'] as String, object: $enumDecode(_$AssistantObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, description: json['description'] as String?, model: json['model'] as String, @@ -2151,22 +2265,19 @@ Map _$$AssistantObjectResponseFormatEnumerationImplToJson( }; const _$AssistantResponseFormatModeEnumMap = { - AssistantResponseFormatMode.none: 'none', AssistantResponseFormatMode.auto: 'auto', }; -_$AssistantObjectResponseFormatAssistantsResponseFormatImpl - _$$AssistantObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$AssistantObjectResponseFormatResponseFormatImpl + _$$AssistantObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$AssistantObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map _$$AssistantObjectResponseFormatAssistantsResponseFormatImplToJson( - _$AssistantObjectResponseFormatAssistantsResponseFormatImpl instance) => +Map _$$AssistantObjectResponseFormatResponseFormatImplToJson( + _$AssistantObjectResponseFormatResponseFormatImpl instance) => { 'value': instance.value.toJson(), 'runtimeType': instance.$type, @@ -2236,6 +2347,7 @@ Map _$$AssistantModelEnumerationImplToJson( }; const _$AssistantModelsEnumMap = { + AssistantModels.chatgpt4oLatest: 'chatgpt-4o-latest', AssistantModels.gpt4: 'gpt-4', AssistantModels.gpt432k: 'gpt-4-32k', AssistantModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2250,6 +2362,9 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt4VisionPreview: 'gpt-4-vision-preview', AssistantModels.gpt4o: 'gpt-4o', AssistantModels.gpt4o20240513: 'gpt-4o-2024-05-13', + AssistantModels.gpt4o20240806: 'gpt-4o-2024-08-06', + AssistantModels.gpt4oMini: 'gpt-4o-mini', + AssistantModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', AssistantModels.gpt35Turbo: 'gpt-3.5-turbo', AssistantModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', AssistantModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2257,6 +2372,10 @@ const _$AssistantModelsEnumMap = { AssistantModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', AssistantModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', AssistantModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + AssistantModels.o1Mini: 'o1-mini', + AssistantModels.o1Mini20240912: 'o1-mini-2024-09-12', + AssistantModels.o1Preview: 'o1-preview', + AssistantModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$AssistantModelStringImpl _$$AssistantModelStringImplFromJson( @@ -2291,27 +2410,24 @@ Map }; const _$CreateAssistantResponseFormatModeEnumMap = { - CreateAssistantResponseFormatMode.none: 'none', CreateAssistantResponseFormatMode.auto: 'auto', }; -_$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateAssistantRequestResponseFormatResponseFormatImpl + _$$CreateAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateAssistantRequestResponseFormatResponseFormatImplToJson( + _$CreateAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( Map json) => @@ -2384,27 +2500,24 @@ Map }; const _$ModifyAssistantResponseFormatModeEnumMap = { - ModifyAssistantResponseFormatMode.none: 'none', ModifyAssistantResponseFormatMode.auto: 'auto', }; -_$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$ModifyAssistantRequestResponseFormatResponseFormatImpl + _$$ModifyAssistantRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$ModifyAssistantRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( - _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$ModifyAssistantRequestResponseFormatResponseFormatImplToJson( + _$ModifyAssistantRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$DeleteAssistantResponseImpl _$$DeleteAssistantResponseImplFromJson( Map json) => @@ -2449,6 +2562,34 @@ Map _$$ListAssistantsResponseImplToJson( 'has_more': instance.hasMore, }; +_$FileSearchRankingOptionsImpl _$$FileSearchRankingOptionsImplFromJson( + Map json) => + _$FileSearchRankingOptionsImpl( + ranker: $enumDecodeNullable(_$FileSearchRankerEnumMap, json['ranker'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + scoreThreshold: (json['score_threshold'] as num).toDouble(), + ); + +Map _$$FileSearchRankingOptionsImplToJson( + _$FileSearchRankingOptionsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranker', _$FileSearchRankerEnumMap[instance.ranker]); + val['score_threshold'] = instance.scoreThreshold; + return val; +} + +const _$FileSearchRankerEnumMap = { + FileSearchRanker.auto: 'auto', + FileSearchRanker.default20240821: 'default_2024_08_21', +}; + _$AssistantsNamedToolChoiceImpl _$$AssistantsNamedToolChoiceImplFromJson( Map json) => _$AssistantsNamedToolChoiceImpl( @@ -2493,30 +2634,11 @@ Map _$$AssistantsFunctionCallOptionImplToJson( 'name': instance.name, }; -_$AssistantsResponseFormatImpl _$$AssistantsResponseFormatImplFromJson( - Map json) => - _$AssistantsResponseFormatImpl( - type: $enumDecodeNullable( - _$AssistantsResponseFormatTypeEnumMap, json['type']) ?? - AssistantsResponseFormatType.text, - ); - -Map _$$AssistantsResponseFormatImplToJson( - _$AssistantsResponseFormatImpl instance) => - { - 'type': _$AssistantsResponseFormatTypeEnumMap[instance.type]!, - }; - -const _$AssistantsResponseFormatTypeEnumMap = { - AssistantsResponseFormatType.text: 'text', - AssistantsResponseFormatType.jsonObject: 'json_object', -}; - _$TruncationObjectImpl _$$TruncationObjectImplFromJson( Map json) => _$TruncationObjectImpl( type: $enumDecode(_$TruncationObjectTypeEnumMap, json['type']), - lastMessages: json['last_messages'] as int?, + lastMessages: (json['last_messages'] as num?)?.toInt(), ); Map _$$TruncationObjectImplToJson( @@ -2544,7 +2666,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => _$RunObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, assistantId: json['assistant_id'] as String, status: $enumDecode(_$RunStatusEnumMap, json['status']), @@ -2555,11 +2677,11 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => lastError: json['last_error'] == null ? null : RunLastError.fromJson(json['last_error'] as Map), - expiresAt: json['expires_at'] as int?, - startedAt: json['started_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + startedAt: (json['started_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), incompleteDetails: json['incomplete_details'] == null ? null : RunObjectIncompleteDetails.fromJson( @@ -2575,14 +2697,15 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => : RunCompletionUsage.fromJson(json['usage'] as Map), temperature: (json['temperature'] as num?)?.toDouble(), topP: (json['top_p'] as num?)?.toDouble(), - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _RunObjectToolChoiceConverter().fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _RunObjectResponseFormatConverter() .fromJson(json['response_format']), ); @@ -2623,6 +2746,7 @@ Map _$$RunObjectImplToJson(_$RunObjectImpl instance) { val['truncation_strategy'] = instance.truncationStrategy?.toJson(); val['tool_choice'] = _$JsonConverterToJson( instance.toolChoice, const _RunObjectToolChoiceConverter().toJson); + val['parallel_tool_calls'] = instance.parallelToolCalls; val['response_format'] = const _RunObjectResponseFormatConverter().toJson(instance.responseFormat); return val; @@ -2761,26 +2885,23 @@ Map _$$RunObjectResponseFormatEnumerationImplToJson( }; const _$RunObjectResponseFormatModeEnumMap = { - RunObjectResponseFormatMode.none: 'none', RunObjectResponseFormatMode.auto: 'auto', }; -_$RunObjectResponseFormatAssistantsResponseFormatImpl - _$$RunObjectResponseFormatAssistantsResponseFormatImplFromJson( +_$RunObjectResponseFormatResponseFormatImpl + _$$RunObjectResponseFormatResponseFormatImplFromJson( Map json) => - _$RunObjectResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$RunObjectResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$RunObjectResponseFormatAssistantsResponseFormatImplToJson( - _$RunObjectResponseFormatAssistantsResponseFormatImpl instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$RunObjectResponseFormatResponseFormatImplToJson( + _$RunObjectResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$RunSubmitToolOutputsImpl _$$RunSubmitToolOutputsImplFromJson( Map json) => @@ -2799,9 +2920,9 @@ Map _$$RunSubmitToolOutputsImplToJson( _$RunCompletionUsageImpl _$$RunCompletionUsageImplFromJson( Map json) => _$RunCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunCompletionUsageImplToJson( @@ -2828,14 +2949,15 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -2868,6 +2990,7 @@ Map _$$CreateRunRequestImplToJson( writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); writeNotNull('tool_choice', const _CreateRunRequestToolChoiceConverter().toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateRunRequestResponseFormatConverter() @@ -2892,6 +3015,7 @@ Map _$$CreateRunRequestModelEnumerationImplToJson( }; const _$RunModelsEnumMap = { + RunModels.chatgpt4oLatest: 'chatgpt-4o-latest', RunModels.gpt4: 'gpt-4', RunModels.gpt432k: 'gpt-4-32k', RunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -2906,6 +3030,9 @@ const _$RunModelsEnumMap = { RunModels.gpt4VisionPreview: 'gpt-4-vision-preview', RunModels.gpt4o: 'gpt-4o', RunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + RunModels.gpt4o20240806: 'gpt-4o-2024-08-06', + RunModels.gpt4oMini: 'gpt-4o-mini', + RunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', RunModels.gpt35Turbo: 'gpt-3.5-turbo', RunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', RunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -2913,6 +3040,10 @@ const _$RunModelsEnumMap = { RunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', RunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', RunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + RunModels.o1Mini: 'o1-mini', + RunModels.o1Mini20240912: 'o1-mini-2024-09-12', + RunModels.o1Preview: 'o1-preview', + RunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$CreateRunRequestModelStringImpl _$$CreateRunRequestModelStringImplFromJson( @@ -2984,27 +3115,23 @@ Map _$$CreateRunRequestResponseFormatEnumerationImplToJson( }; const _$CreateRunRequestResponseFormatModeEnumMap = { - CreateRunRequestResponseFormatMode.none: 'none', CreateRunRequestResponseFormatMode.auto: 'auto', }; -_$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplFromJson( +_$CreateRunRequestResponseFormatResponseFormatImpl + _$$CreateRunRequestResponseFormatResponseFormatImplFromJson( Map json) => - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateRunRequestResponseFormatResponseFormatImplToJson( + _$CreateRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ListRunsResponseImpl _$$ListRunsResponseImplFromJson( Map json) => @@ -3150,14 +3277,15 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, - maxPromptTokens: json['max_prompt_tokens'] as int?, - maxCompletionTokens: json['max_completion_tokens'] as int?, + maxPromptTokens: (json['max_prompt_tokens'] as num?)?.toInt(), + maxCompletionTokens: (json['max_completion_tokens'] as num?)?.toInt(), truncationStrategy: json['truncation_strategy'] == null ? null : TruncationObject.fromJson( json['truncation_strategy'] as Map), toolChoice: const _CreateThreadAndRunRequestToolChoiceConverter() .fromJson(json['tool_choice']), + parallelToolCalls: json['parallel_tool_calls'] as bool?, responseFormat: const _CreateThreadAndRunRequestResponseFormatConverter() .fromJson(json['response_format']), stream: json['stream'] as bool?, @@ -3191,6 +3319,7 @@ Map _$$CreateThreadAndRunRequestImplToJson( 'tool_choice', const _CreateThreadAndRunRequestToolChoiceConverter() .toJson(instance.toolChoice)); + writeNotNull('parallel_tool_calls', instance.parallelToolCalls); writeNotNull( 'response_format', const _CreateThreadAndRunRequestResponseFormatConverter() @@ -3214,6 +3343,7 @@ Map _$$ThreadAndRunModelEnumerationImplToJson( }; const _$ThreadAndRunModelsEnumMap = { + ThreadAndRunModels.chatgpt4oLatest: 'chatgpt-4o-latest', ThreadAndRunModels.gpt4: 'gpt-4', ThreadAndRunModels.gpt432k: 'gpt-4-32k', ThreadAndRunModels.gpt432k0314: 'gpt-4-32k-0314', @@ -3228,6 +3358,9 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt4VisionPreview: 'gpt-4-vision-preview', ThreadAndRunModels.gpt4o: 'gpt-4o', ThreadAndRunModels.gpt4o20240513: 'gpt-4o-2024-05-13', + ThreadAndRunModels.gpt4o20240806: 'gpt-4o-2024-08-06', + ThreadAndRunModels.gpt4oMini: 'gpt-4o-mini', + ThreadAndRunModels.gpt4oMini20240718: 'gpt-4o-mini-2024-07-18', ThreadAndRunModels.gpt35Turbo: 'gpt-3.5-turbo', ThreadAndRunModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', ThreadAndRunModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', @@ -3235,6 +3368,10 @@ const _$ThreadAndRunModelsEnumMap = { ThreadAndRunModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', ThreadAndRunModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', ThreadAndRunModels.gpt35Turbo1106: 'gpt-3.5-turbo-1106', + ThreadAndRunModels.o1Mini: 'o1-mini', + ThreadAndRunModels.o1Mini20240912: 'o1-mini-2024-09-12', + ThreadAndRunModels.o1Preview: 'o1-preview', + ThreadAndRunModels.o1Preview20240912: 'o1-preview-2024-09-12', }; _$ThreadAndRunModelStringImpl _$$ThreadAndRunModelStringImplFromJson( @@ -3312,33 +3449,30 @@ Map json) => - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl( - AssistantsResponseFormat.fromJson( - json['value'] as Map), + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl( + ResponseFormat.fromJson(json['value'] as Map), $type: json['runtimeType'] as String?, ); -Map - _$$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImplToJson( - _$CreateThreadAndRunRequestResponseFormatAssistantsResponseFormatImpl - instance) => - { - 'value': instance.value.toJson(), - 'runtimeType': instance.$type, - }; +Map _$$CreateThreadAndRunRequestResponseFormatResponseFormatImplToJson( + _$CreateThreadAndRunRequestResponseFormatResponseFormatImpl instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; _$ThreadObjectImpl _$$ThreadObjectImplFromJson(Map json) => _$ThreadObjectImpl( id: json['id'] as String, object: $enumDecode(_$ThreadObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), toolResources: json['tool_resources'] == null ? null : ToolResources.fromJson( @@ -3489,6 +3623,10 @@ _$ToolResourcesFileSearchVectorStoreImpl fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -3503,6 +3641,7 @@ Map _$$ToolResourcesFileSearchVectorStoreImplToJson( } writeNotNull('file_ids', instance.fileIds); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -3553,7 +3692,7 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => _$MessageObjectImpl( id: json['id'] as String, object: $enumDecode(_$MessageObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), threadId: json['thread_id'] as String, status: $enumDecodeNullable(_$MessageObjectStatusEnumMap, json['status'], unknownValue: JsonKey.nullForUndefinedEnumValue), @@ -3561,8 +3700,8 @@ _$MessageObjectImpl _$$MessageObjectImplFromJson(Map json) => ? null : MessageObjectIncompleteDetails.fromJson( json['incomplete_details'] as Map), - completedAt: json['completed_at'] as int?, - incompleteAt: json['incomplete_at'] as int?, + completedAt: (json['completed_at'] as num?)?.toInt(), + incompleteAt: (json['incomplete_at'] as num?)?.toInt(), role: $enumDecode(_$MessageRoleEnumMap, json['role']), content: (json['content'] as List) .map((e) => MessageContent.fromJson(e as Map)) @@ -3906,44 +4045,14 @@ _$MessageContentTextAnnotationsFileCitationImpl Map json) => _$MessageContentTextAnnotationsFileCitationImpl( fileId: json['file_id'] as String, - quote: json['quote'] as String, ); Map _$$MessageContentTextAnnotationsFileCitationImplToJson( _$MessageContentTextAnnotationsFileCitationImpl instance) => { 'file_id': instance.fileId, - 'quote': instance.quote, }; -_$MessageDeltaContentImageUrlObjectImpl - _$$MessageDeltaContentImageUrlObjectImplFromJson( - Map json) => - _$MessageDeltaContentImageUrlObjectImpl( - index: json['index'] as int?, - type: json['type'] as String?, - imageUrl: json['image_url'] == null - ? null - : MessageContentImageUrl.fromJson( - json['image_url'] as Map), - ); - -Map _$$MessageDeltaContentImageUrlObjectImplToJson( - _$MessageDeltaContentImageUrlObjectImpl instance) { - final val = {}; - - void writeNotNull(String key, dynamic value) { - if (value != null) { - val[key] = value; - } - } - - writeNotNull('index', instance.index); - writeNotNull('type', instance.type); - writeNotNull('image_url', instance.imageUrl?.toJson()); - return val; -} - _$MessageDeltaContentTextImpl _$$MessageDeltaContentTextImplFromJson( Map json) => _$MessageDeltaContentTextImpl( @@ -3998,7 +4107,7 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => _$RunStepObjectImpl( id: json['id'] as String, object: $enumDecode(_$RunStepObjectObjectEnumMap, json['object']), - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), assistantId: json['assistant_id'] as String, threadId: json['thread_id'] as String, runId: json['run_id'] as String, @@ -4010,10 +4119,10 @@ _$RunStepObjectImpl _$$RunStepObjectImplFromJson(Map json) => ? null : RunStepLastError.fromJson( json['last_error'] as Map), - expiredAt: json['expired_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, - failedAt: json['failed_at'] as int?, - completedAt: json['completed_at'] as int?, + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), metadata: json['metadata'] as Map?, usage: json['usage'] == null ? null @@ -4255,12 +4364,115 @@ Map return val; } +_$RunStepDetailsToolCallsFileSearchImpl + _$$RunStepDetailsToolCallsFileSearchImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchImpl( + rankingOptions: json['ranking_options'] == null + ? null + : RunStepDetailsToolCallsFileSearchRankingOptionsObject.fromJson( + json['ranking_options'] as Map), + results: (json['results'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultObject.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchImplToJson( + _$RunStepDetailsToolCallsFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); + writeNotNull('results', instance.results?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl + _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl( + ranker: $enumDecode(_$FileSearchRankerEnumMap, json['ranker']), + scoreThreshold: (json['score_threshold'] as num).toDouble(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchRankingOptionsObjectImpl instance) => + { + 'ranker': _$FileSearchRankerEnumMap[instance.ranker]!, + 'score_threshold': instance.scoreThreshold, + }; + +_$RunStepDetailsToolCallsFileSearchResultObjectImpl + _$$RunStepDetailsToolCallsFileSearchResultObjectImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultObjectImpl( + fileId: json['file_id'] as String, + fileName: json['file_name'] as String, + score: (json['score'] as num).toDouble(), + content: (json['content'] as List?) + ?.map((e) => + RunStepDetailsToolCallsFileSearchResultContent.fromJson( + e as Map)) + .toList(), + ); + +Map _$$RunStepDetailsToolCallsFileSearchResultObjectImplToJson( + _$RunStepDetailsToolCallsFileSearchResultObjectImpl instance) { + final val = { + 'file_id': instance.fileId, + 'file_name': instance.fileName, + 'score': instance.score, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content?.map((e) => e.toJson()).toList()); + return val; +} + +_$RunStepDetailsToolCallsFileSearchResultContentImpl + _$$RunStepDetailsToolCallsFileSearchResultContentImplFromJson( + Map json) => + _$RunStepDetailsToolCallsFileSearchResultContentImpl( + type: json['type'] as String? ?? 'text', + text: json['text'] as String?, + ); + +Map + _$$RunStepDetailsToolCallsFileSearchResultContentImplToJson( + _$RunStepDetailsToolCallsFileSearchResultContentImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('text', instance.text); + return val; +} + _$RunStepCompletionUsageImpl _$$RunStepCompletionUsageImplFromJson( Map json) => _$RunStepCompletionUsageImpl( - completionTokens: json['completion_tokens'] as int, - promptTokens: json['prompt_tokens'] as int, - totalTokens: json['total_tokens'] as int, + completionTokens: (json['completion_tokens'] as num).toInt(), + promptTokens: (json['prompt_tokens'] as num).toInt(), + totalTokens: (json['total_tokens'] as num).toInt(), ); Map _$$RunStepCompletionUsageImplToJson( @@ -4276,7 +4488,7 @@ _$VectorStoreExpirationAfterImpl _$$VectorStoreExpirationAfterImplFromJson( _$VectorStoreExpirationAfterImpl( anchor: $enumDecode( _$VectorStoreExpirationAfterAnchorEnumMap, json['anchor']), - days: json['days'] as int, + days: (json['days'] as num).toInt(), ); Map _$$VectorStoreExpirationAfterImplToJson( @@ -4295,9 +4507,9 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( _$VectorStoreObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), name: json['name'] as String?, - usageBytes: json['usage_bytes'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), fileCounts: VectorStoreObjectFileCounts.fromJson( json['file_counts'] as Map), status: $enumDecode(_$VectorStoreObjectStatusEnumMap, json['status']), @@ -4305,8 +4517,8 @@ _$VectorStoreObjectImpl _$$VectorStoreObjectImplFromJson( ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), - expiresAt: json['expires_at'] as int?, - lastActiveAt: json['last_active_at'] as int?, + expiresAt: (json['expires_at'] as num?)?.toInt(), + lastActiveAt: (json['last_active_at'] as num?)?.toInt(), metadata: json['metadata'], ); @@ -4344,11 +4556,11 @@ const _$VectorStoreObjectStatusEnumMap = { _$VectorStoreObjectFileCountsImpl _$$VectorStoreObjectFileCountsImplFromJson( Map json) => _$VectorStoreObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreObjectFileCountsImplToJson( @@ -4364,14 +4576,18 @@ Map _$$VectorStoreObjectFileCountsImplToJson( _$CreateVectorStoreRequestImpl _$$CreateVectorStoreRequestImplFromJson( Map json) => _$CreateVectorStoreRequestImpl( + name: json['name'] as String?, fileIds: (json['file_ids'] as List?) ?.map((e) => e as String) .toList(), - name: json['name'] as String, expiresAfter: json['expires_after'] == null ? null : VectorStoreExpirationAfter.fromJson( json['expires_after'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), metadata: json['metadata'], ); @@ -4385,9 +4601,10 @@ Map _$$CreateVectorStoreRequestImplToJson( } } + writeNotNull('name', instance.name); writeNotNull('file_ids', instance.fileIds); - val['name'] = instance.name; writeNotNull('expires_after', instance.expiresAfter?.toJson()); + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); writeNotNull('metadata', instance.metadata); return val; } @@ -4462,27 +4679,41 @@ _$VectorStoreFileObjectImpl _$$VectorStoreFileObjectImplFromJson( _$VectorStoreFileObjectImpl( id: json['id'] as String, object: json['object'] as String, - usageBytes: json['usage_bytes'] as int, - createdAt: json['created_at'] as int, + usageBytes: (json['usage_bytes'] as num).toInt(), + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode(_$VectorStoreFileStatusEnumMap, json['status']), lastError: json['last_error'] == null ? null : VectorStoreFileObjectLastError.fromJson( json['last_error'] as Map), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyResponseParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$VectorStoreFileObjectImplToJson( - _$VectorStoreFileObjectImpl instance) => - { - 'id': instance.id, - 'object': instance.object, - 'usage_bytes': instance.usageBytes, - 'created_at': instance.createdAt, - 'vector_store_id': instance.vectorStoreId, - 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, - 'last_error': instance.lastError?.toJson(), - }; + _$VectorStoreFileObjectImpl instance) { + final val = { + 'id': instance.id, + 'object': instance.object, + 'usage_bytes': instance.usageBytes, + 'created_at': instance.createdAt, + 'vector_store_id': instance.vectorStoreId, + 'status': _$VectorStoreFileStatusEnumMap[instance.status]!, + 'last_error': instance.lastError?.toJson(), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} const _$VectorStoreFileStatusEnumMap = { VectorStoreFileStatus.inProgress: 'in_progress', @@ -4507,23 +4738,50 @@ Map _$$VectorStoreFileObjectLastErrorImplToJson( }; const _$VectorStoreFileObjectLastErrorCodeEnumMap = { - VectorStoreFileObjectLastErrorCode.internalError: 'internal_error', - VectorStoreFileObjectLastErrorCode.fileNotFound: 'file_not_found', - VectorStoreFileObjectLastErrorCode.parsingError: 'parsing_error', - VectorStoreFileObjectLastErrorCode.unhandledMimeType: 'unhandled_mime_type', + VectorStoreFileObjectLastErrorCode.serverError: 'server_error', + VectorStoreFileObjectLastErrorCode.unsupportedFile: 'unsupported_file', + VectorStoreFileObjectLastErrorCode.invalidFile: 'invalid_file', }; +_$StaticChunkingStrategyImpl _$$StaticChunkingStrategyImplFromJson( + Map json) => + _$StaticChunkingStrategyImpl( + maxChunkSizeTokens: (json['max_chunk_size_tokens'] as num).toInt(), + chunkOverlapTokens: (json['chunk_overlap_tokens'] as num).toInt(), + ); + +Map _$$StaticChunkingStrategyImplToJson( + _$StaticChunkingStrategyImpl instance) => + { + 'max_chunk_size_tokens': instance.maxChunkSizeTokens, + 'chunk_overlap_tokens': instance.chunkOverlapTokens, + }; + _$CreateVectorStoreFileRequestImpl _$$CreateVectorStoreFileRequestImplFromJson( Map json) => _$CreateVectorStoreFileRequestImpl( fileId: json['file_id'] as String, + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileRequestImplToJson( - _$CreateVectorStoreFileRequestImpl instance) => - { - 'file_id': instance.fileId, - }; + _$CreateVectorStoreFileRequestImpl instance) { + final val = { + 'file_id': instance.fileId, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ListVectorStoreFilesResponseImpl _$$ListVectorStoreFilesResponseImplFromJson( Map json) => @@ -4568,7 +4826,7 @@ _$VectorStoreFileBatchObjectImpl _$$VectorStoreFileBatchObjectImplFromJson( _$VectorStoreFileBatchObjectImpl( id: json['id'] as String, object: json['object'] as String, - createdAt: json['created_at'] as int, + createdAt: (json['created_at'] as num).toInt(), vectorStoreId: json['vector_store_id'] as String, status: $enumDecode( _$VectorStoreFileBatchObjectStatusEnumMap, json['status']), @@ -4598,11 +4856,11 @@ _$VectorStoreFileBatchObjectFileCountsImpl _$$VectorStoreFileBatchObjectFileCountsImplFromJson( Map json) => _$VectorStoreFileBatchObjectFileCountsImpl( - inProgress: json['in_progress'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, - cancelled: json['cancelled'] as int, - total: json['total'] as int, + inProgress: (json['in_progress'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), + cancelled: (json['cancelled'] as num).toInt(), + total: (json['total'] as num).toInt(), ); Map _$$VectorStoreFileBatchObjectFileCountsImplToJson( @@ -4622,13 +4880,27 @@ _$CreateVectorStoreFileBatchRequestImpl fileIds: (json['file_ids'] as List) .map((e) => e as String) .toList(), + chunkingStrategy: json['chunking_strategy'] == null + ? null + : ChunkingStrategyRequestParam.fromJson( + json['chunking_strategy'] as Map), ); Map _$$CreateVectorStoreFileBatchRequestImplToJson( - _$CreateVectorStoreFileBatchRequestImpl instance) => - { - 'file_ids': instance.fileIds, - }; + _$CreateVectorStoreFileBatchRequestImpl instance) { + final val = { + 'file_ids': instance.fileIds, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('chunking_strategy', instance.chunkingStrategy?.toJson()); + return val; +} _$ErrorImpl _$$ErrorImplFromJson(Map json) => _$ErrorImpl( code: json['code'] as String?, @@ -4699,15 +4971,15 @@ _$BatchImpl _$$BatchImplFromJson(Map json) => _$BatchImpl( status: $enumDecode(_$BatchStatusEnumMap, json['status']), outputFileId: json['output_file_id'] as String?, errorFileId: json['error_file_id'] as String?, - createdAt: json['created_at'] as int, - inProgressAt: json['in_progress_at'] as int?, - expiresAt: json['expires_at'] as int?, - finalizingAt: json['finalizing_at'] as int?, - completedAt: json['completed_at'] as int?, - failedAt: json['failed_at'] as int?, - expiredAt: json['expired_at'] as int?, - cancellingAt: json['cancelling_at'] as int?, - cancelledAt: json['cancelled_at'] as int?, + createdAt: (json['created_at'] as num).toInt(), + inProgressAt: (json['in_progress_at'] as num?)?.toInt(), + expiresAt: (json['expires_at'] as num?)?.toInt(), + finalizingAt: (json['finalizing_at'] as num?)?.toInt(), + completedAt: (json['completed_at'] as num?)?.toInt(), + failedAt: (json['failed_at'] as num?)?.toInt(), + expiredAt: (json['expired_at'] as num?)?.toInt(), + cancellingAt: (json['cancelling_at'] as num?)?.toInt(), + cancelledAt: (json['cancelled_at'] as num?)?.toInt(), requestCounts: json['request_counts'] == null ? null : BatchRequestCounts.fromJson( @@ -4789,9 +5061,9 @@ Map _$$BatchErrorsImplToJson(_$BatchErrorsImpl instance) { _$BatchRequestCountsImpl _$$BatchRequestCountsImplFromJson( Map json) => _$BatchRequestCountsImpl( - total: json['total'] as int, - completed: json['completed'] as int, - failed: json['failed'] as int, + total: (json['total'] as num).toInt(), + completed: (json['completed'] as num).toInt(), + failed: (json['failed'] as num).toInt(), ); Map _$$BatchRequestCountsImplToJson( @@ -4808,7 +5080,7 @@ _$BatchErrorsDataInnerImpl _$$BatchErrorsDataInnerImplFromJson( code: json['code'] as String?, message: json['message'] as String?, param: json['param'] as String?, - line: json['line'] as int?, + line: (json['line'] as num?)?.toInt(), ); Map _$$BatchErrorsDataInnerImplToJson( @@ -4926,6 +5198,7 @@ _$ChatCompletionAssistantMessageImpl _$ChatCompletionMessageRoleEnumMap, json['role']) ?? ChatCompletionMessageRole.assistant, content: json['content'] as String?, + refusal: json['refusal'] as String?, name: json['name'] as String?, toolCalls: (json['tool_calls'] as List?) ?.map((e) => ChatCompletionMessageToolCall.fromJson( @@ -4950,6 +5223,7 @@ Map _$$ChatCompletionAssistantMessageImplToJson( } writeNotNull('content', instance.content); + writeNotNull('refusal', instance.refusal); writeNotNull('name', instance.name); writeNotNull( 'tool_calls', instance.toolCalls?.map((e) => e.toJson()).toList()); @@ -5047,6 +5321,7 @@ Map _$$ChatCompletionMessageContentPartTextImplToJson( const _$ChatCompletionMessageContentPartTypeEnumMap = { ChatCompletionMessageContentPartType.text: 'text', ChatCompletionMessageContentPartType.imageUrl: 'image_url', + ChatCompletionMessageContentPartType.refusal: 'refusal', }; _$ChatCompletionMessageContentPartImageImpl @@ -5068,6 +5343,24 @@ Map _$$ChatCompletionMessageContentPartImageImplToJson( 'image_url': instance.imageUrl.toJson(), }; +_$ChatCompletionMessageContentPartRefusalImpl + _$$ChatCompletionMessageContentPartRefusalImplFromJson( + Map json) => + _$ChatCompletionMessageContentPartRefusalImpl( + type: $enumDecodeNullable( + _$ChatCompletionMessageContentPartTypeEnumMap, + json['type']) ?? + ChatCompletionMessageContentPartType.refusal, + refusal: json['refusal'] as String, + ); + +Map _$$ChatCompletionMessageContentPartRefusalImplToJson( + _$ChatCompletionMessageContentPartRefusalImpl instance) => + { + 'type': _$ChatCompletionMessageContentPartTypeEnumMap[instance.type]!, + 'refusal': instance.refusal, + }; + _$ChatCompletionMessageImageUrlImpl _$$ChatCompletionMessageImageUrlImplFromJson(Map json) => _$ChatCompletionMessageImageUrlImpl( @@ -5090,6 +5383,54 @@ const _$ChatCompletionMessageImageDetailEnumMap = { ChatCompletionMessageImageDetail.high: 'high', }; +_$ResponseFormatTextImpl _$$ResponseFormatTextImplFromJson( + Map json) => + _$ResponseFormatTextImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.text, + ); + +Map _$$ResponseFormatTextImplToJson( + _$ResponseFormatTextImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +const _$ResponseFormatTypeEnumMap = { + ResponseFormatType.text: 'text', + ResponseFormatType.jsonObject: 'json_object', + ResponseFormatType.jsonSchema: 'json_schema', +}; + +_$ResponseFormatJsonObjectImpl _$$ResponseFormatJsonObjectImplFromJson( + Map json) => + _$ResponseFormatJsonObjectImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonObject, + ); + +Map _$$ResponseFormatJsonObjectImplToJson( + _$ResponseFormatJsonObjectImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + }; + +_$ResponseFormatJsonSchemaImpl _$$ResponseFormatJsonSchemaImplFromJson( + Map json) => + _$ResponseFormatJsonSchemaImpl( + type: $enumDecodeNullable(_$ResponseFormatTypeEnumMap, json['type']) ?? + ResponseFormatType.jsonSchema, + jsonSchema: JsonSchemaObject.fromJson( + json['json_schema'] as Map), + ); + +Map _$$ResponseFormatJsonSchemaImplToJson( + _$ResponseFormatJsonSchemaImpl instance) => + { + 'type': _$ResponseFormatTypeEnumMap[instance.type]!, + 'json_schema': instance.jsonSchema.toJson(), + }; + _$AssistantToolsCodeInterpreterImpl _$$AssistantToolsCodeInterpreterImplFromJson(Map json) => _$AssistantToolsCodeInterpreterImpl( @@ -5105,14 +5446,28 @@ Map _$$AssistantToolsCodeInterpreterImplToJson( _$AssistantToolsFileSearchImpl _$$AssistantToolsFileSearchImplFromJson( Map json) => _$AssistantToolsFileSearchImpl( - type: json['type'] as String? ?? 'file_search', + type: json['type'] as String, + fileSearch: json['file_search'] == null + ? null + : AssistantToolsFileSearchFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$AssistantToolsFileSearchImplToJson( - _$AssistantToolsFileSearchImpl instance) => - { - 'type': instance.type, - }; + _$AssistantToolsFileSearchImpl instance) { + final val = { + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('file_search', instance.fileSearch?.toJson()); + return val; +} _$AssistantToolsFunctionImpl _$$AssistantToolsFunctionImplFromJson( Map json) => @@ -5129,6 +5484,32 @@ Map _$$AssistantToolsFunctionImplToJson( 'function': instance.function.toJson(), }; +_$AssistantToolsFileSearchFileSearchImpl + _$$AssistantToolsFileSearchFileSearchImplFromJson( + Map json) => + _$AssistantToolsFileSearchFileSearchImpl( + maxNumResults: (json['max_num_results'] as num?)?.toInt(), + rankingOptions: json['ranking_options'] == null + ? null + : FileSearchRankingOptions.fromJson( + json['ranking_options'] as Map), + ); + +Map _$$AssistantToolsFileSearchFileSearchImplToJson( + _$AssistantToolsFileSearchFileSearchImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('max_num_results', instance.maxNumResults); + writeNotNull('ranking_options', instance.rankingOptions?.toJson()); + return val; +} + _$MessageContentImageFileObjectImpl _$$MessageContentImageFileObjectImplFromJson(Map json) => _$MessageContentImageFileObjectImpl( @@ -5173,11 +5554,25 @@ Map _$$MessageContentTextObjectImplToJson( 'text': instance.text.toJson(), }; +_$MessageContentRefusalObjectImpl _$$MessageContentRefusalObjectImplFromJson( + Map json) => + _$MessageContentRefusalObjectImpl( + type: json['type'] as String, + refusal: json['refusal'] as String, + ); + +Map _$$MessageContentRefusalObjectImplToJson( + _$MessageContentRefusalObjectImpl instance) => + { + 'type': instance.type, + 'refusal': instance.refusal, + }; + _$MessageDeltaContentImageFileObjectImpl _$$MessageDeltaContentImageFileObjectImplFromJson( Map json) => _$MessageDeltaContentImageFileObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, imageFile: json['image_file'] == null ? null @@ -5205,7 +5600,7 @@ Map _$$MessageDeltaContentImageFileObjectImplToJson( _$MessageDeltaContentTextObjectImpl _$$MessageDeltaContentTextObjectImplFromJson(Map json) => _$MessageDeltaContentTextObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] == null ? null @@ -5230,6 +5625,61 @@ Map _$$MessageDeltaContentTextObjectImplToJson( return val; } +_$MessageDeltaContentRefusalObjectImpl + _$$MessageDeltaContentRefusalObjectImplFromJson( + Map json) => + _$MessageDeltaContentRefusalObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + refusal: json['refusal'] as String?, + ); + +Map _$$MessageDeltaContentRefusalObjectImplToJson( + _$MessageDeltaContentRefusalObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('refusal', instance.refusal); + return val; +} + +_$MessageDeltaContentImageUrlObjectImpl + _$$MessageDeltaContentImageUrlObjectImplFromJson( + Map json) => + _$MessageDeltaContentImageUrlObjectImpl( + index: (json['index'] as num).toInt(), + type: json['type'] as String, + imageUrl: json['image_url'] == null + ? null + : MessageContentImageUrl.fromJson( + json['image_url'] as Map), + ); + +Map _$$MessageDeltaContentImageUrlObjectImplToJson( + _$MessageDeltaContentImageUrlObjectImpl instance) { + final val = { + 'index': instance.index, + 'type': instance.type, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('image_url', instance.imageUrl?.toJson()); + return val; +} + _$MessageContentTextAnnotationsFileCitationObjectImpl _$$MessageContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => @@ -5238,8 +5688,8 @@ _$MessageContentTextAnnotationsFileCitationObjectImpl text: json['text'] as String, fileCitation: MessageContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map @@ -5261,8 +5711,8 @@ _$MessageContentTextAnnotationsFilePathObjectImpl text: json['text'] as String, filePath: MessageContentTextAnnotationsFilePath.fromJson( json['file_path'] as Map), - startIndex: json['start_index'] as int, - endIndex: json['end_index'] as int, + startIndex: (json['start_index'] as num).toInt(), + endIndex: (json['end_index'] as num).toInt(), ); Map _$$MessageContentTextAnnotationsFilePathObjectImplToJson( @@ -5292,15 +5742,15 @@ _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl _$$MessageDeltaContentTextAnnotationsFileCitationObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFileCitationObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, fileCitation: json['file_citation'] == null ? null : MessageDeltaContentTextAnnotationsFileCitation.fromJson( json['file_citation'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5328,15 +5778,15 @@ _$MessageDeltaContentTextAnnotationsFilePathObjectImpl _$$MessageDeltaContentTextAnnotationsFilePathObjectImplFromJson( Map json) => _$MessageDeltaContentTextAnnotationsFilePathObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, text: json['text'] as String?, filePath: json['file_path'] == null ? null : MessageDeltaContentTextAnnotationsFilePathObjectFilePath .fromJson(json['file_path'] as Map), - startIndex: json['start_index'] as int?, - endIndex: json['end_index'] as int?, + startIndex: (json['start_index'] as num?)?.toInt(), + endIndex: (json['end_index'] as num?)?.toInt(), ); Map @@ -5496,7 +5946,8 @@ _$RunStepDetailsToolCallsFileSearchObjectImpl _$RunStepDetailsToolCallsFileSearchObjectImpl( id: json['id'] as String, type: json['type'] as String, - fileSearch: json['file_search'] as Map, + fileSearch: RunStepDetailsToolCallsFileSearch.fromJson( + json['file_search'] as Map), ); Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( @@ -5504,7 +5955,7 @@ Map _$$RunStepDetailsToolCallsFileSearchObjectImplToJson( { 'id': instance.id, 'type': instance.type, - 'file_search': instance.fileSearch, + 'file_search': instance.fileSearch.toJson(), }; _$RunStepDetailsToolCallsFunctionObjectImpl @@ -5545,7 +5996,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, codeInterpreter: json['code_interpreter'] == null @@ -5576,7 +6027,7 @@ _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl _$$RunStepDeltaStepDetailsToolCallsFileSearchObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFileSearchObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, fileSearch: json['file_search'] as Map, @@ -5605,7 +6056,7 @@ _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl _$$RunStepDeltaStepDetailsToolCallsFunctionObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsFunctionObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), id: json['id'] as String?, type: json['type'] as String, function: json['function'] == null @@ -5693,7 +6144,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, logs: json['logs'] as String?, ); @@ -5720,7 +6171,7 @@ _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl _$$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImplFromJson( Map json) => _$RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImpl( - index: json['index'] as int, + index: (json['index'] as num).toInt(), type: json['type'] as String, image: json['image'] == null ? null @@ -5746,6 +6197,64 @@ Map return val; } +_$AutoChunkingStrategyRequestParamImpl + _$$AutoChunkingStrategyRequestParamImplFromJson( + Map json) => + _$AutoChunkingStrategyRequestParamImpl( + type: json['type'] as String, + ); + +Map _$$AutoChunkingStrategyRequestParamImplToJson( + _$AutoChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + }; + +_$StaticChunkingStrategyRequestParamImpl + _$$StaticChunkingStrategyRequestParamImplFromJson( + Map json) => + _$StaticChunkingStrategyRequestParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyRequestParamImplToJson( + _$StaticChunkingStrategyRequestParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$StaticChunkingStrategyResponseParamImpl + _$$StaticChunkingStrategyResponseParamImplFromJson( + Map json) => + _$StaticChunkingStrategyResponseParamImpl( + type: json['type'] as String, + static: StaticChunkingStrategy.fromJson( + json['static'] as Map), + ); + +Map _$$StaticChunkingStrategyResponseParamImplToJson( + _$StaticChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + 'static': instance.static.toJson(), + }; + +_$OtherChunkingStrategyResponseParamImpl + _$$OtherChunkingStrategyResponseParamImplFromJson( + Map json) => + _$OtherChunkingStrategyResponseParamImpl( + type: json['type'] as String, + ); + +Map _$$OtherChunkingStrategyResponseParamImplToJson( + _$OtherChunkingStrategyResponseParamImpl instance) => + { + 'type': instance.type, + }; + _$ThreadStreamEventImpl _$$ThreadStreamEventImplFromJson( Map json) => _$ThreadStreamEventImpl( diff --git a/packages/openai_dart/lib/src/generated/schema/service_tier.dart b/packages/openai_dart/lib/src/generated/schema/service_tier.dart new file mode 100644 index 00000000..8a01afc5 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/service_tier.dart @@ -0,0 +1,18 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ServiceTier +// ========================================== + +/// The service tier used for processing the request. This field is only included if the `service_tier` parameter +/// is specified in the request. +enum ServiceTier { + @JsonValue('scale') + scale, + @JsonValue('default') + vDefault, +} diff --git a/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart new file mode 100644 index 00000000..aa67e062 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/static_chunking_strategy.dart @@ -0,0 +1,60 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: StaticChunkingStrategy +// ========================================== + +/// Static chunking strategy +@freezed +class StaticChunkingStrategy with _$StaticChunkingStrategy { + const StaticChunkingStrategy._(); + + /// Factory constructor for StaticChunkingStrategy + const factory StaticChunkingStrategy({ + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + /// maximum value is `4096`. + @JsonKey(name: 'max_chunk_size_tokens') required int maxChunkSizeTokens, + + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + @JsonKey(name: 'chunk_overlap_tokens') required int chunkOverlapTokens, + }) = _StaticChunkingStrategy; + + /// Object construction from a JSON representation + factory StaticChunkingStrategy.fromJson(Map json) => + _$StaticChunkingStrategyFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'max_chunk_size_tokens', + 'chunk_overlap_tokens' + ]; + + /// Validation constants + static const maxChunkSizeTokensMinValue = 100; + static const maxChunkSizeTokensMaxValue = 4096; + + /// Perform validations on the schema property values + String? validateSchema() { + if (maxChunkSizeTokens < maxChunkSizeTokensMinValue) { + return "The value of 'maxChunkSizeTokens' cannot be < $maxChunkSizeTokensMinValue"; + } + if (maxChunkSizeTokens > maxChunkSizeTokensMaxValue) { + return "The value of 'maxChunkSizeTokens' cannot be > $maxChunkSizeTokensMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'max_chunk_size_tokens': maxChunkSizeTokens, + 'chunk_overlap_tokens': chunkOverlapTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/thread_object.dart b/packages/openai_dart/lib/src/generated/schema/thread_object.dart index a5ae0ea8..20f2e014 100644 --- a/packages/openai_dart/lib/src/generated/schema/thread_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/thread_object.dart @@ -27,7 +27,9 @@ class ThreadObject with _$ThreadObject { /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @JsonKey(name: 'tool_resources') required ToolResources? toolResources, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required Map? metadata, }) = _ThreadObject; diff --git a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart index 63247873..cc01299d 100644 --- a/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart +++ b/packages/openai_dart/lib/src/generated/schema/tool_resources_file_search_vector_store.dart @@ -19,6 +19,11 @@ class ToolResourcesFileSearchVectorStore /// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. @JsonKey(name: 'file_ids', includeIfNull: false) List? fileIds, + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + /// Any of: [AutoChunkingStrategyRequestParam], [StaticChunkingStrategyRequestParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyRequestParam? chunkingStrategy, + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _ToolResourcesFileSearchVectorStore; @@ -29,7 +34,11 @@ class ToolResourcesFileSearchVectorStore _$ToolResourcesFileSearchVectorStoreFromJson(json); /// List of all property names of schema - static const List propertyNames = ['file_ids', 'metadata']; + static const List propertyNames = [ + 'file_ids', + 'chunking_strategy', + 'metadata' + ]; /// Perform validations on the schema property values String? validateSchema() { @@ -40,6 +49,7 @@ class ToolResourcesFileSearchVectorStore Map toMap() { return { 'file_ids': fileIds, + 'chunking_strategy': chunkingStrategy, 'metadata': metadata, }; } diff --git a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart index 7105bd0c..d2ef2414 100644 --- a/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/update_vector_store_request.dart @@ -22,7 +22,9 @@ class UpdateVectorStoreRequest with _$UpdateVectorStoreRequest { @JsonKey(name: 'expires_after', includeIfNull: false) VectorStoreExpirationAfter? expiresAfter, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) dynamic metadata, }) = _UpdateVectorStoreRequest; diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart index 53e6f928..3664758b 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_file_object.dart @@ -36,6 +36,11 @@ class VectorStoreFileObject with _$VectorStoreFileObject { /// The last error associated with this vector store file. Will be `null` if there are no errors. @JsonKey(name: 'last_error') required VectorStoreFileObjectLastError? lastError, + + /// The chunking strategy used to chunk the file(s). + /// Any of: [StaticChunkingStrategyResponseParam], [OtherChunkingStrategyResponseParam] + @JsonKey(name: 'chunking_strategy', includeIfNull: false) + ChunkingStrategyResponseParam? chunkingStrategy, }) = _VectorStoreFileObject; /// Object construction from a JSON representation @@ -50,7 +55,8 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'created_at', 'vector_store_id', 'status', - 'last_error' + 'last_error', + 'chunking_strategy' ]; /// Perform validations on the schema property values @@ -68,6 +74,7 @@ class VectorStoreFileObject with _$VectorStoreFileObject { 'vector_store_id': vectorStoreId, 'status': status, 'last_error': lastError, + 'chunking_strategy': chunkingStrategy, }; } } @@ -133,12 +140,10 @@ class VectorStoreFileObjectLastError with _$VectorStoreFileObjectLastError { /// One of `server_error` or `rate_limit_exceeded`. enum VectorStoreFileObjectLastErrorCode { - @JsonValue('internal_error') - internalError, - @JsonValue('file_not_found') - fileNotFound, - @JsonValue('parsing_error') - parsingError, - @JsonValue('unhandled_mime_type') - unhandledMimeType, + @JsonValue('server_error') + serverError, + @JsonValue('unsupported_file') + unsupportedFile, + @JsonValue('invalid_file') + invalidFile, } diff --git a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart index 836d8337..a3d49591 100644 --- a/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/vector_store_object.dart @@ -47,7 +47,9 @@ class VectorStoreObject with _$VectorStoreObject { /// The Unix timestamp (in seconds) for when the vector store was last active. @JsonKey(name: 'last_active_at') required int? lastActiveAt, - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + /// information about the object in a structured format. Keys can be a maximum of 64 characters long and values + /// can be a maxium of 512 characters long. required dynamic metadata, }) = _VectorStoreObject; diff --git a/packages/openai_dart/lib/src/http_client/http_client.dart b/packages/openai_dart/lib/src/http_client/http_client.dart index 99555ca4..0ad0b2fc 100644 --- a/packages/openai_dart/lib/src/http_client/http_client.dart +++ b/packages/openai_dart/lib/src/http_client/http_client.dart @@ -1,4 +1,3 @@ export 'http_client_stub.dart' if (dart.library.io) 'http_client_io.dart' - if (dart.library.js) 'http_client_html.dart' - if (dart.library.html) 'http_client_html.dart'; + if (dart.library.js_interop) 'http_client_html.dart'; diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 1f2fe406..ab870afb 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -1,3 +1,4 @@ +// ignore_for_file: avoid_print import 'dart:io'; import 'package:openapi_spec/openapi_spec.dart'; @@ -18,10 +19,12 @@ void main() async { enabled: true, ), ); - await Process.run( + final res = await Process.run( 'dart', ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], ); + print(res.stdout); + print(res.stderr); } String? _onSchemaName(final String schemaName) => switch (schemaName) { @@ -46,11 +49,15 @@ String? _onSchemaUnionFactoryName( 'ChatCompletionMessageContentParts' => 'parts', 'ChatCompletionMessageContentPartText' => 'text', 'ChatCompletionMessageContentPartImage' => 'image', + 'ChatCompletionMessageContentPartRefusal' => 'refusal', 'ChatCompletionToolChoiceOptionEnumeration' => 'mode', 'ChatCompletionToolChoiceOptionChatCompletionNamedToolChoice' => 'tool', 'ChatCompletionFunctionCallEnumeration' => 'mode', 'ChatCompletionFunctionCallChatCompletionFunctionCallOption' => 'function', + 'ResponseFormatText' => 'text', + 'ResponseFormatJsonObject' => 'jsonObject', + 'ResponseFormatJsonSchema' => 'jsonSchema', // Completion 'CompletionModelEnumeration' => 'model', 'CompletionModelString' => 'modelId', @@ -77,53 +84,59 @@ String? _onSchemaUnionFactoryName( // Assistant 'AssistantModelEnumeration' => 'model', 'AssistantModelString' => 'modelId', + 'AssistantObjectResponseFormatEnumeration' => 'mode', + 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', + 'CreateMessageRequestContentListMessageContent' => 'parts', + 'CreateMessageRequestContentString' => 'text', + 'CreateRunRequestModelEnumeration' => 'model', + 'CreateRunRequestModelString' => 'modelId', + 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', + 'CreateRunRequestResponseFormatEnumeration' => 'mode', + 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateRunRequestToolChoiceEnumeration' => 'mode', + 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', + 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', + 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', 'MessageContentImageFileObject' => 'imageFile', - 'MessageDeltaContentImageFileObject' => 'imageFile', - 'MessageContentTextObject' => 'text', - 'MessageDeltaContentTextObject' => 'text', 'MessageContentImageUrlObject' => 'imageUrl', 'MessageContentTextAnnotationsFileCitationObject' => 'fileCitation', - 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageContentTextObject' => 'text', + 'MessageContentRefusalObject' => 'refusal', + 'MessageDeltaContentImageFileObject' => 'imageFile', + 'MessageDeltaContentRefusalObject' => 'refusal', + 'MessageDeltaContentImageUrlObject' => 'imageUrl', + 'MessageDeltaContentTextAnnotationsFileCitationObject' => 'fileCitation', 'MessageDeltaContentTextAnnotationsFilePathObject' => 'filePath', + 'MessageDeltaContentTextObject' => 'text', + 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => + 'format', + 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', 'RunModelEnumeration' => 'model', 'RunModelString' => 'modelId', - 'ThreadAndRunModelEnumeration' => 'model', - 'ThreadAndRunModelString' => 'modelId', - 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', + 'RunObjectResponseFormatEnumeration' => 'mode', + 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', + 'RunObjectToolChoiceEnumeration' => 'mode', + 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', 'RunStepDeltaStepDetailsToolCallsCodeObject' => 'codeInterpreter', - 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', 'RunStepDeltaStepDetailsToolCallsFileSearchObject' => 'fileSearch', - 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDeltaStepDetailsToolCallsFunctionObject' => 'function', - 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' => 'logs', - 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', - 'RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', 'RunStepDetailsMessageCreationObject' => 'messageCreation', - 'RunStepDeltaStepDetailsMessageCreationObject' => 'messageCreation', + 'RunStepDetailsToolCallsCodeObject' => 'codeInterpreter', + 'RunStepDetailsToolCallsCodeOutputImageObject' => 'image', + 'RunStepDetailsToolCallsCodeOutputLogsObject' => 'logs', + 'RunStepDetailsToolCallsFileSearchObject' => 'fileSearch', + 'RunStepDetailsToolCallsFunctionObject' => 'function', 'RunStepDetailsToolCallsObject' => 'toolCalls', - 'RunStepDeltaStepDetailsToolCallsObject' => 'toolCalls', - 'CreateRunRequestResponseFormatEnumeration' => 'mode', - 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', - 'RunObjectResponseFormatEnumeration' => 'mode', - 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', - 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', - 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', - 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', - 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => - 'format', - 'CreateRunRequestToolChoiceEnumeration' => 'mode', - 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', - 'RunObjectToolChoiceEnumeration' => 'mode', - 'CreateRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateThreadAndRunRequestToolChoiceAssistantsNamedToolChoice' => 'tool', - 'RunObjectToolChoiceAssistantsNamedToolChoice' => 'tool', - 'CreateMessageRequestContentString' => 'text', - 'CreateMessageRequestContentListMessageContent' => 'parts', + 'ThreadAndRunModelEnumeration' => 'model', + 'ThreadAndRunModelString' => 'modelId', _ => null, }; diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 9490261d..b7333f2c 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -4,7 +4,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -310,7 +310,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -330,7 +330,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -796,6 +796,16 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + schema: + type: string requestBody: required: true content: @@ -968,6 +978,11 @@ paths: description: *pagination_before_param_description schema: type: string + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -1000,6 +1015,11 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include + in: query + description: *include_param_description + schema: + type: string responses: "200": description: OK @@ -1027,7 +1047,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1153,7 +1173,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1169,7 +1189,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1371,7 +1391,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -1387,7 +1407,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -1465,7 +1485,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -1795,8 +1815,10 @@ components: properties: model: title: ChatCompletionModel - description: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" + description: | + ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. + example: "gpt-4o" anyOf: - type: string description: The ID of the model to use for this request. @@ -1806,6 +1828,7 @@ components: Available completion models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -1820,6 +1843,9 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -1827,9 +1853,15 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + description: | + A list of messages comprising the conversation so far. + [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). type: array minItems: 1 items: @@ -1850,22 +1882,39 @@ components: description: | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias + value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase + likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the + relevant token. logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + description: | + Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of + each output token returned in the `content` of `message`. type: boolean nullable: true top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: | + An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, + each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 maximum: 20 nullable: true max_tokens: description: | - The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat + completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated + via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + type: integer + nullable: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output + tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). type: integer nullable: true n: @@ -1875,7 +1924,9 @@ components: default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: | + How many chat completion choices to generate for each input message. Note that you will be charged based on + the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 @@ -1884,21 +1935,7 @@ components: nullable: true description: *completions_presence_penalty_description response_format: - title: ChatCompletionResponseFormat - type: object - description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: [ "text", "json_object" ] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + $ref: "#/components/schemas/ResponseFormat" seed: type: integer # minimum: -9223372036854775808 # The value can't be represented exactly in JavaScript @@ -1906,8 +1943,27 @@ components: nullable: true description: | This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + If specified, our system will make a best effort to sample deterministically, such that repeated requests + with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to + monitor changes in the backend. + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers + subscribed to the scale tier service: + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits + until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the + default service tier with a lower uptime SLA and no latency guarantee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime + SLA and no latency guarantee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null stop: title: ChatCompletionStop description: | @@ -1925,8 +1981,10 @@ components: type: string stream: description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false @@ -1950,9 +2008,10 @@ components: description: *completions_top_p_description tools: type: array - description: > + description: | A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are + supported. items: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: @@ -1962,8 +2021,9 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the + model to call that tool. + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -1972,8 +2032,15 @@ components: `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + parallel_tool_calls: ¶llel_tool_calls + description: | + Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + during tool use. + type: boolean + default: null + nullable: true user: *end_user_param_configuration function_call: title: ChatCompletionFunctionCall @@ -1984,7 +2051,8 @@ components: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. + Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that + function. `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: @@ -2045,13 +2113,12 @@ components: default: user description: The role of the messages author, in this case `user`. content: - # TODO extract to ChatCompletionMessageContent once generator bug fixed description: The contents of the user message. oneOf: - type: string - description: The text contents of the message. + description: The text contents of the user message. - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model. + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. items: $ref: "#/components/schemas/ChatCompletionMessageContentPart" minItems: 1 @@ -2073,6 +2140,10 @@ components: type: string description: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. @@ -2124,11 +2195,12 @@ components: oneOf: - $ref: "#/components/schemas/ChatCompletionMessageContentPartText" - $ref: "#/components/schemas/ChatCompletionMessageContentPartImage" + - $ref: "#/components/schemas/ChatCompletionMessageContentPartRefusal" discriminator: propertyName: type ChatCompletionMessageContentPartText: type: object - description: A text content part of a user message. + description: A text content part of a message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2141,7 +2213,7 @@ components: - text ChatCompletionMessageContentPartImage: type: object - title: Image content part + description: An image content part of a user message. properties: type: $ref: "#/components/schemas/ChatCompletionMessageContentPartType" @@ -2166,9 +2238,25 @@ components: - url required: - image_url + ChatCompletionMessageContentPartRefusal: + type: object + description: A refusal content part of a message. + properties: + type: + $ref: "#/components/schemas/ChatCompletionMessageContentPartType" + default: refusal + description: The type of the content part, in this case `refusal`. + refusal: + type: string + description: The refusal message generated by the model. + required: + - refusal ChatCompletionMessageContentPartType: type: string - enum: [ "text", "image_url" ] + enum: + - text + - image_url + - refusal description: The type of the content part. ChatCompletionMessageRole: type: string @@ -2202,18 +2290,120 @@ components: properties: name: type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + description: | + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a + maximum length of 64. description: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. + description: | + A description of what the function does, used by the model to choose when and how to call the function. parameters: $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the function call. If set to true, the model will + follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when + `strict` is `true`. Learn more about Structured Outputs in the + [function calling guide](](https://platform.openai.com/docs/guides/function-calling). required: - name FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true + ResponseFormat: + type: object + description: | + An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer + than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model + will match your supplied JSON schema. + Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is + valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system + or user message. Without this, the model may generate an unending stream of whitespace until the generation + reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message + content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded + `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + discriminator: + propertyName: type + ResponseFormatType: + type: string + enum: + - text + - json_object + - json_schema + description: The type of response format being defined. + ResponseFormatText: + type: object + description: "The model should respond with plain text." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "text" + ResponseFormatJsonObject: + type: object + description: "The model should respond with a JSON object." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_object" + ResponseFormatJsonSchema: + type: object + description: "The model should respond with a JSON object that adheres to the specified schema." + properties: + type: + $ref: "#/components/schemas/ResponseFormatType" + default: "json_schema" + json_schema: + $ref: "#/components/schemas/JsonSchemaObject" + required: + - json_schema + JsonSchemaObject: + type: object + description: "A JSON Schema object." + properties: + name: + type: string + description: | + The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + length of 64. + description: + type: string + description: | + A description of what the response format is for, used by the model to determine how to respond in the + format. + schema: + type: object + description: | + The schema for the response format, described as a JSON Schema object. + additionalProperties: true + strict: + type: boolean + nullable: true + default: false + description: | + Whether to enable strict schema adherence when generating the output. If set to true, the model will always + follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when + `strict` is `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + required: + - name + - schema ChatCompletionTool: type: object description: A tool the model may use. @@ -2290,6 +2480,8 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2342,6 +2534,14 @@ components: "content_filter", "function_call", ] + ServiceTier: + description: | + The service tier used for processing the request. This field is only included if the `service_tier` parameter + is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" + nullable: true ChatCompletionLogprobs: &chat_completion_response_logprobs description: Log probability information for the choice. type: object @@ -2353,8 +2553,12 @@ components: items: $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - required: - - content + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true ChatCompletionTokenLogprob: type: object description: Log probability information for a token. @@ -2412,6 +2616,8 @@ components: model: type: string description: The model to generate the completion. + service_tier: + $ref: "#/components/schemas/ServiceTier" system_fingerprint: type: string description: | @@ -2426,10 +2632,10 @@ components: $ref: "#/components/schemas/CompletionUsage" required: - choices - - created + # - created # Made nullable to support FastChat API which doesn't return this field with some models # - id # Made nullable to support OpenRouter API which doesn't return this field with some models # - model # Made nullable to support TogetherAI API which doesn't return this field with some models - - object + # - object # Made nullable to support FastChat API which doesn't return this field with some models ChatCompletionStreamResponseChoice: type: object description: A choice the model generated for the input prompt. @@ -2455,6 +2661,10 @@ components: type: string description: The contents of the chunk message. nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true function_call: $ref: "#/components/schemas/ChatCompletionStreamMessageFunctionCall" tool_calls: @@ -2504,10 +2714,19 @@ components: total_tokens: type: integer description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + $ref: "#/components/schemas/CompletionTokensDetails" required: - prompt_tokens - completion_tokens - total_tokens + CompletionTokensDetails: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. CreateEmbeddingRequest: type: object description: Request object for the Create embedding endpoint. @@ -2653,7 +2872,7 @@ components: description: | The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" + example: "gpt-4o-mini" anyOf: - type: string description: The ID of the model to use for this request. @@ -2661,15 +2880,20 @@ components: title: FineTuningModels description: | Available fine-tuning models. Mind that the list may not be exhaustive nor up-to-date. - enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] training_file: description: | The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose + `fine-tune`. + The contents of the file should differ depending on if the model uses the + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -2677,12 +2901,12 @@ components: $ref: "#/components/schemas/FineTuningJobHyperparameters" suffix: description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + A string of up to 64 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string minLength: 1 - maxLength: 40 + maxLength: 64 default: null nullable: true validation_file: @@ -2808,7 +3032,7 @@ components: type: string description: | The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - enum: ["wandb"] + enum: [ "wandb" ] wandb: id: FineTuningIntegrationWandB type: object @@ -2881,8 +3105,10 @@ components: n_epochs: title: FineTuningNEpochs description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number + manually, we support any number between 1 and 50 epochs. oneOf: - type: string title: FineTuningNEpochsOptions @@ -2943,7 +3169,7 @@ components: object: type: string description: The object type, which is always "list". - enum: [list] + enum: [ list ] first_id: type: string description: The ID of the first checkpoint in the list. @@ -3036,7 +3262,7 @@ components: object: type: string description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [ fine_tuning.job.checkpoint ] required: - created_at - fine_tuning_job_id @@ -3396,7 +3622,8 @@ components: nullable: true tools: description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of + types `code_interpreter`, `file_search`, or `function`. default: [ ] type: array maxItems: 128 @@ -3406,13 +3633,16 @@ components: $ref: "#/components/schemas/ToolResources" metadata: description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional + information about the object in a structured format. Keys can be a maximum of 64 characters long and values + can be a maxium of 512 characters long. type: object additionalProperties: true nullable: true temperature: description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 @@ -3427,23 +3657,38 @@ components: example: 1 nullable: true description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability + mass are considered. We generally recommend altering this or temperature but not both. response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + description: &assistant_response_format | + Specifies the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures + the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates + is valid JSON. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a + system or user message. Without this, the model may generate an unending stream of whitespace until the + generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note + that the message content may be partially cut off if `finish_reason="length"`, which indicates the + generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string title: AssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -3472,6 +3717,7 @@ components: Available assistant models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -3486,6 +3732,9 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -3493,6 +3742,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] name: description: *assistant_name_param_description @@ -3524,8 +3777,7 @@ components: additionalProperties: true nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description type: number minimum: 0 maximum: 2 @@ -3539,24 +3791,17 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - model ModifyAssistantRequest: @@ -3619,24 +3864,17 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: ModifyAssistantResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" DeleteAssistantResponse: type: object description: Represents a deleted response returned by the Delete assistant endpoint. @@ -3710,7 +3948,48 @@ components: type: type: string description: "The type of tool being defined: `file_search`" - default: "file_search" + default: file_search + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models + and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" + required: + - type + FileSearchRankingOptions: + type: object + description: | + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and + a score_threshold of 0. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + for more information. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - score_threshold + FileSearchRanker: + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] AssistantToolsFunction: type: object description: Function tool @@ -3730,7 +4009,7 @@ components: type: type: string title: AssistantsToolType - enum: ["function", "code_interpreter", "file_search"] + enum: [ "function", "code_interpreter", "file_search" ] description: The type of the tool. If type is `function`, the function name must be set function: $ref: "#/components/schemas/AssistantsFunctionCallOption" @@ -3744,18 +4023,6 @@ components: description: The name of the function to call. required: - name - AssistantsResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - title: AssistantsResponseFormatType - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. TruncationObject: type: object description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -3764,7 +4031,7 @@ components: type: string name: TruncationStrategy description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] + enum: [ "auto", "last_messages" ] last_messages: type: integer description: The number of most recent messages from the thread when constructing the context for the run. @@ -3842,7 +4109,7 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -3877,7 +4144,7 @@ components: reason: description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] + enum: [ "max_completion_tokens", "max_prompt_tokens" ] model: description: The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. type: string @@ -3936,22 +4203,19 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: RunObjectResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" required: - id - object @@ -3976,6 +4240,7 @@ components: - max_completion_tokens - truncation_strategy - tool_choice + - parallel_tool_calls - response_format RunCompletionUsage: type: object @@ -4016,6 +4281,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4030,6 +4296,9 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4037,6 +4306,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4079,10 +4352,7 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description max_prompt_tokens: type: integer nullable: true @@ -4113,22 +4383,19 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4257,6 +4524,7 @@ components: Available models. Mind that the list may not be exhaustive nor up-to-date. enum: [ + "chatgpt-4o-latest", "gpt-4", "gpt-4-32k", "gpt-4-32k-0314", @@ -4271,6 +4539,9 @@ components: "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", @@ -4278,6 +4549,10 @@ components: "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview", + "o1-preview-2024-09-12", ] instructions: description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -4343,22 +4618,19 @@ components: `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] + enum: [ none, auto, required ] - $ref: "#/components/schemas/AssistantsNamedToolChoice" + parallel_tool_calls: *parallel_tool_calls response_format: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + description: *assistant_response_format oneOf: - type: string title: CreateThreadAndRunRequestResponseFormatMode description: > `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsResponseFormat" + enum: [ auto ] + default: auto + - $ref: "#/components/schemas/ResponseFormat" stream: type: boolean nullable: true @@ -4436,7 +4708,7 @@ components: type: array description: | A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -4470,6 +4742,8 @@ components: maxItems: 10000 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: type: object description: | @@ -4544,7 +4818,7 @@ components: description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. type: string nullable: true - enum: ["in_progress", "incomplete", "completed"] + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: id: MessageIncompleteDetails type: object @@ -4641,7 +4915,7 @@ components: object: description: The object type, which is always `thread.message.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.message.delta" ] delta: $ref: "#/components/schemas/MessageDelta" required: @@ -4666,6 +4940,7 @@ components: - $ref: "#/components/schemas/MessageContentImageFileObject" - $ref: "#/components/schemas/MessageContentImageUrlObject" - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" discriminator: propertyName: type MessageDeltaContent: @@ -4674,6 +4949,8 @@ components: oneOf: - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" discriminator: propertyName: type CreateMessageRequest: @@ -4885,12 +5162,8 @@ components: file_id: description: The ID of the specific File the citation is from. type: string - quote: - description: The specific quote in the file. - type: string required: - file_id - - quote MessageContentTextAnnotationsFilePathObject: type: object description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. @@ -4922,6 +5195,20 @@ components: - file_path - start_index - end_index + MessageContentRefusalObject: + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + default: refusal + refusal: + type: string + nullable: false + required: + - type + - refusal MessageDeltaContentImageFileObject: type: object description: References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. @@ -4961,6 +5248,9 @@ components: type: string image_url: $ref: "#/components/schemas/MessageContentImageUrl" + required: + - index + - type MessageDeltaContentTextObject: type: object description: The text content that is part of a message. @@ -5062,6 +5352,23 @@ components: required: - index - type + MessageDeltaContentRefusalObject: + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + type: string + description: Always `refusal`. + default: refusal + refusal: + type: string + description: The refusal content generated by the assistant. + required: + - index + - type RunStepObject: type: object description: | @@ -5166,7 +5473,7 @@ components: object: description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.run.step.delta"] + enum: [ "thread.run.step.delta" ] delta: $ref: "#/components/schemas/RunStepDelta" required: @@ -5463,13 +5770,74 @@ components: type: string description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: - type: object - description: For now, this is always going to be an empty object. - additionalProperties: true + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearch" required: - id - type - file_search + RunStepDetailsToolCallsFileSearch: + type: object + description: The definition of the file search that was called. + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + type: object + description: The ranking options for the file search. + properties: + ranker: + $ref: "#/components/schemas/FileSearchRanker" + score_threshold: + type: number + description: | + The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + RunStepDetailsToolCallsFileSearchResultObject: + type: object + description: A result instance of the file search. + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: | + The content of the result that was found. The content is only included if requested via the include + query parameter. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultContent" + required: + - file_id + - file_name + - score + RunStepDetailsToolCallsFileSearchResultContent: + type: object + description: The content of the result that was found. + properties: + type: + type: string + description: The type of the content. + default: text + text: + type: string + description: The text content of the file. RunStepDeltaStepDetailsToolCallsFileSearchObject: type: object description: File search tool call @@ -5580,7 +5948,7 @@ components: anchor: description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." type: string - enum: ["last_active_at"] + enum: [ "last_active_at" ] days: description: The number of days after the anchor time that the vector store will expire. type: integer @@ -5639,7 +6007,7 @@ components: description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. type: string name: VectorStoreStatus - enum: ["expired", "in_progress", "completed"] + enum: [ "expired", "in_progress", "completed" ] expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" expires_at: @@ -5669,23 +6037,23 @@ components: description: Request object for the Create assistant file endpoint. additionalProperties: false properties: + name: + description: The name of the vector store. + type: string file_ids: description: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. type: array maxItems: 500 items: type: string - name: - description: The name of the vector store. - type: string expires_after: $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" metadata: description: *metadata_description type: object nullable: true - required: - - name UpdateVectorStoreRequest: type: object description: Request object for the Update vector store endpoint. @@ -5774,7 +6142,7 @@ components: description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. type: string title: VectorStoreFileStatus - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] last_error: id: VectorStoreFileLastError type: object @@ -5786,10 +6154,9 @@ components: description: One of `server_error` or `rate_limit_exceeded`. enum: [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", + "server_error", + "unsupported_file", + "invalid_file", ] message: type: string @@ -5797,6 +6164,8 @@ components: required: - code - message + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyResponseParam" required: - id - object @@ -5805,6 +6174,97 @@ components: - vector_store_id - status - last_error + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + discriminator: + propertyName: type + AutoChunkingStrategyRequestParam: + type: object + description: | + Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` + and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + default: auto + required: + - type + StaticChunkingStrategyRequestParam: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + StaticChunkingStrategy: + type: object + description: Static chunking strategy + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: | + The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + ChunkingStrategyResponseParam: + type: object + description: The chunking strategy used to chunk the file(s). + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + discriminator: + propertyName: type + OtherChunkingStrategyResponseParam: + type: object + description: | + Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because + the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + default: other + required: + - type + StaticChunkingStrategyResponseParam: + type: object + description: Static Chunking Strategy. + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + default: static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static CreateVectorStoreFileRequest: type: object description: Request object for the Create vector store file endpoint. @@ -5812,6 +6272,8 @@ components: file_id: description: A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_id ListVectorStoreFilesResponse: @@ -5881,7 +6343,7 @@ components: status: description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object description: The number of files per status. @@ -5925,6 +6387,8 @@ components: maxItems: 500 items: type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - file_ids AssistantStreamEvent: @@ -6012,7 +6476,7 @@ components: - data RunStepStreamEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6023,7 +6487,7 @@ components: - data RunStepStreamDeltaEvent: type: object - description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/runs/step-object) changes state. + description: Occurs when a new [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) changes state. properties: event: $ref: "#/components/schemas/EventType" @@ -6117,7 +6581,7 @@ components: See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. - Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. + Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: $ref: "#/components/schemas/BatchEndpoint" completion_window: @@ -6130,11 +6594,16 @@ components: nullable: true BatchEndpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. Batch: type: object @@ -6144,7 +6613,7 @@ components: type: string object: type: string - enum: [batch] + enum: [ batch ] description: The object type, which is always `batch`. endpoint: $ref: "#/components/schemas/BatchEndpoint" @@ -6264,7 +6733,7 @@ components: description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. method: type: string - enum: ["POST"] + enum: [ "POST" ] description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string @@ -6329,7 +6798,7 @@ components: object: type: string description: The object type, which is always `list`. - enum: [list] + enum: [ list ] required: - object - data diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index fa38d7f7..d9b16b55 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -16,7 +16,7 @@ tags: - name: Assistants description: Build Assistants that can call models and use tools. - name: Audio - description: Learn how to turn audio into text or text into audio. + description: Turn audio into text or text into audio. - name: Chat description: Given a list of messages comprising a conversation, the model will return a response. - name: Completions @@ -29,12 +29,16 @@ tags: description: Create large batches of API requests to run asynchronously. - name: Files description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. - name: Images description: Given a prompt and/or an input image, the model will generate a new image. - name: Models description: List and describe the various models available in the API. - name: Moderations description: Given a input text, outputs if the model classifies it as potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -87,7 +91,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -95,29 +99,29 @@ paths: {"role": "user", "content": "Hello!"} ] ) - + print(completion.choices[0].message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "system", content: "You are a helpful assistant." }], model: "VAR_model_id", }); - + console.log(completion.choices[0]); } - + main(); response: &chat_completion_example | { "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -141,7 +145,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -163,11 +167,11 @@ paths: }' python: | from openai import OpenAI - + client = OpenAI() - + response = client.chat.completions.create( - model="gpt-4-turbo", + model="gpt-4o", messages=[ { "role": "user", @@ -175,23 +179,25 @@ paths: {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + } }, ], } ], max_tokens=300, ) - + print(response.choices[0]) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: [ { role: "user", @@ -199,9 +205,10 @@ paths: { type: "text", text: "What's in this image?" }, { type: "image_url", - image_url: - "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", - }, + image_url: { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + } ], }, ], @@ -214,7 +221,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -254,7 +261,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -263,15 +270,15 @@ paths: ], stream=True ) - + for chunk in completion: print(chunk.choices[0].delta) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ model: "VAR_model_id", @@ -281,21 +288,21 @@ paths: ], stream: true, }); - + for await (const chunk of completion) { console.log(chunk.choices[0].delta.content); } } - + main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + .... - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -303,7 +310,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -338,7 +345,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -366,13 +373,13 @@ paths: tools=tools, tool_choice="auto" ) - + print(completion) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; const tools = [ @@ -395,24 +402,24 @@ paths: } } ]; - + const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: messages, tools: tools, tool_choice: "auto", }); - + console.log(response); } - + main(); response: &chat_completion_function_example | { "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -460,7 +467,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -469,14 +476,14 @@ paths: logprobs=True, top_logprobs=2 ) - + print(completion.choices[0].message) print(completion.choices[0].logprobs) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: "Hello!" }], @@ -484,17 +491,17 @@ paths: logprobs: true, top_logprobs: 2, }); - + console.log(completion.choices[0]); } - + main(); response: | { "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -716,7 +723,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -725,9 +732,9 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.completions.create({ model: "VAR_model_id", @@ -735,7 +742,7 @@ paths: max_tokens: 7, temperature: 0, }); - + console.log(completion); } main(); @@ -776,7 +783,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -787,16 +794,16 @@ paths: print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.completions.create({ model: "VAR_model_id", prompt: "Say this is a test.", stream: true, }); - + for await (const chunk of stream) { console.log(chunk.choices[0].text) } @@ -857,7 +864,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.generate( model="dall-e-3", prompt="A cute baby sea otter", @@ -866,12 +873,12 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); - + console.log(image.data); } main(); @@ -923,7 +930,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), @@ -934,16 +941,16 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.edit({ image: fs.createReadStream("otter.png"), mask: fs.createReadStream("mask.png"), prompt: "A cute baby sea otter wearing a beret", }); - + console.log(image.data); } main(); @@ -993,7 +1000,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.images.create_variation( image=open("image_edit_original.png", "rb"), n=2, @@ -1002,14 +1009,14 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.createVariation({ image: fs.createReadStream("otter.png"), }); - + console.log(image.data); } main(); @@ -1063,7 +1070,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.embeddings.create( model="text-embedding-ada-002", input="The food was delicious and the waiter...", @@ -1071,19 +1078,19 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", encoding_format: "float", }); - + console.log(embedding); } - + main(); response: | { @@ -1151,7 +1158,7 @@ paths: python: | from pathlib import Path import openai - + speech_file_path = Path(__file__).parent / "speech.mp3" response = openai.audio.speech.create( model="tts-1", @@ -1163,11 +1170,11 @@ paths: import fs from "fs"; import path from "path"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + const speechFile = path.resolve("./speech.mp3"); - + async function main() { const mp3 = await openai.audio.speech.create({ model: "tts-1", @@ -1216,7 +1223,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( model="whisper-1", @@ -1225,15 +1232,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), model: "whisper-1", }); - + console.log(transcription.text); } main(); @@ -1254,7 +1261,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1262,14 +1269,14 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1277,7 +1284,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["word"] }); - + console.log(transcription.text); } main(); @@ -1314,7 +1321,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1322,14 +1329,14 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1337,7 +1344,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["segment"] }); - + console.log(transcription.text); } main(); @@ -1401,7 +1408,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.translations.create( model="whisper-1", @@ -1410,15 +1417,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const translation = await openai.audio.translations.create({ file: fs.createReadStream("speech.mp3"), model: "whisper-1", }); - + console.log(translation.text); } main(); @@ -1459,21 +1466,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.files.list(); - + for await (const file of list) { console.log(file); } } - + main(); response: | { @@ -1503,13 +1510,13 @@ paths: - Files summary: | Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. - + The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - - The Fine-tuning API only supports `.jsonl` files. - - The Batch API only supports `.jsonl` files up to 100 MB in size. - + + The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models. + + The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input). + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true @@ -1538,7 +1545,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.create( file=open("mydata.jsonl", "rb"), purpose="fine-tune" @@ -1546,18 +1553,18 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.create({ file: fs.createReadStream("mydata.jsonl"), purpose: "fine-tune", }); - + console.log(file); } - + main(); response: | { @@ -1601,19 +1608,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.del("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1652,19 +1659,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.retrieve("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1707,20 +1714,232 @@ paths: python: | from openai import OpenAI client = OpenAI() - + content = client.files.content("file-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.content("file-abc123"); - + console.log(file); } - + main(); + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: | + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: | + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: | + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } /fine_tuning/jobs: post: @@ -1729,9 +1948,9 @@ paths: - Fine-tuning summary: | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) requestBody: required: true @@ -1759,36 +1978,36 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1804,7 +2023,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "hyperparameters": { "n_epochs": 2 } @@ -1812,36 +2031,36 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo", + model="gpt-4o-mini", hyperparameters={ "n_epochs":2 } ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", - model: "gpt-3.5-turbo", + model: "gpt-4o-mini", hyperparameters: { n_epochs: 2 } }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1859,38 +2078,38 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", validation_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1907,7 +2126,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "integrations": [ { "type": "wandb", @@ -1925,8 +2144,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1983,21 +2202,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.jobs.list(); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2023,7 +2242,7 @@ paths: - Fine-tuning summary: | Get info about a fine-tuning job. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: - in: path @@ -2053,19 +2272,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); - + console.log(fineTune); } - + main(); response: &fine_tuning_example | { @@ -2140,24 +2359,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list_events( fine_tuning_job_id="ftjob-abc123", limit=2 ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2166,7 +2385,7 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", - "created_at": 1692407401, + "created_at": 1721764800, "level": "info", "message": "Fine tuning job successfully completed", "data": null, @@ -2175,9 +2394,9 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-tyiGuB72evQncpH87xe505Sv", - "created_at": 1692407400, + "created_at": 1721764800, "level": "info", - "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", "data": null, "type": "message" } @@ -2219,16 +2438,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); - + console.log(fineTune); } main(); @@ -2236,8 +2455,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1689376978, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2300,8 +2519,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", - "created_at": 1519129973, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", "metrics": { "full_valid_loss": 0.134, "full_valid_mean_token_accuracy": 0.874 @@ -2312,8 +2531,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", - "created_at": 1519129833, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", "metrics": { "full_valid_loss": 0.167, "full_valid_mean_token_accuracy": 0.781 @@ -2352,16 +2571,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.models.list(); - + for await (const model of list) { console.log(model); } @@ -2405,7 +2624,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -2426,19 +2645,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.retrieve("VAR_model_id"); - + console.log(model); } - + main(); response: &retrieve_model_response | { @@ -2458,7 +2677,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -2474,28 +2693,28 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - - client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); + console.log(model); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", "object": "model", "deleted": true } @@ -2535,24 +2754,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + moderation = client.moderations.create(input="I want to kill them.") print(moderation) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const moderation = await openai.moderations.create({ input: "I want to kill them." }); - + console.log(moderation); } main(); response: &moderation_example | { "id": "modr-XXXXX", - "model": "text-moderation-005", + "model": "text-moderation-007", "results": [ { "flagged": true, @@ -2608,7 +2827,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: &pagination_after_param_description | @@ -2643,7 +2862,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistants = client.beta.assistants.list( order="desc", limit="20", @@ -2651,18 +2870,18 @@ paths: print(my_assistants.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistants = await openai.beta.assistants.list({ order: "desc", limit: "20", }); - + console.log(myAssistants.data); } - + main(); response: &list_assistants_example | { @@ -2674,7 +2893,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2689,7 +2908,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2704,7 +2923,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [], "tool_resources": {}, @@ -2753,37 +2972,37 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4-turbo", + model="gpt-4o", ) print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4-turbo", + model: "gpt-4o", }); - + console.log(myAssistant); } - + main(); response: &create_assistants_example | { @@ -2792,7 +3011,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -2815,25 +3034,25 @@ paths: "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "file_search"}], "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", tools=[{"type": "file_search"}], tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, - model="gpt-4-turbo" + model="gpt-4o" ) print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2845,12 +3064,12 @@ paths: vector_store_ids: ["vs_123"] } }, - model: "gpt-4-turbo" + model: "gpt-4o" }); - + console.log(myAssistant); } - + main(); response: | { @@ -2859,7 +3078,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2912,22 +3131,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.retrieve("asst_abc123") print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.retrieve( "asst_abc123" ); - + console.log(myAssistant); } - + main(); response: | { @@ -2936,7 +3155,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2988,26 +3207,26 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "file_search"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI client = OpenAI() - + my_updated_assistant = client.beta.assistants.update( "asst_abc123", instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "file_search"}], - model="gpt-4-turbo" + model="gpt-4o" ) - + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myUpdatedAssistant = await openai.beta.assistants.update( "asst_abc123", @@ -3016,13 +3235,13 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "file_search" }], - model: "gpt-4-turbo" + model: "gpt-4o" } ); - + console.log(myUpdatedAssistant); } - + main(); response: | { @@ -3031,7 +3250,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3083,17 +3302,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.assistants.del("asst_abc123"); - + console.log(response); } main(); @@ -3139,20 +3358,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + empty_thread = client.beta.threads.create() print(empty_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const emptyThread = await openai.beta.threads.create(); - + console.log(emptyThread); } - + main(); response: | { @@ -3181,7 +3400,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message_thread = client.beta.threads.create( messages=[ { @@ -3194,13 +3413,13 @@ paths: }, ] ) - + print(message_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messageThread = await openai.beta.threads.create({ messages: [ @@ -3214,10 +3433,10 @@ paths: }, ], }); - + console.log(messageThread); } - + main(); response: | { @@ -3263,22 +3482,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_thread = client.beta.threads.retrieve("thread_abc123") print(my_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myThread = await openai.beta.threads.retrieve( "thread_abc123" ); - + console.log(myThread); } - + main(); response: | { @@ -3338,7 +3557,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_thread = client.beta.threads.update( "thread_abc123", metadata={ @@ -3349,9 +3568,9 @@ paths: print(my_updated_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const updatedThread = await openai.beta.threads.update( "thread_abc123", @@ -3359,10 +3578,10 @@ paths: metadata: { modified: "true", user: "abc123" }, } ); - + console.log(updatedThread); } - + main(); response: | { @@ -3410,17 +3629,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.threads.delete("thread_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.threads.del("thread_abc123"); - + console.log(response); } main(); @@ -3457,7 +3676,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -3496,22 +3715,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.list( "thread_abc123" ); - + console.log(threadMessages.data); } - + main(); response: | { @@ -3606,7 +3825,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_message = client.beta.threads.messages.create( "thread_abc123", role="user", @@ -3615,18 +3834,18 @@ paths: print(thread_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.create( "thread_abc123", { role: "user", content: "How does AI work? Explain it in simple terms." } ); - + console.log(threadMessages); } - + main(); response: | { @@ -3680,7 +3899,7 @@ paths: name: Retrieve message group: threads beta: true - returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + returns: The [message](/docs/api-reference/messages/object) object matching the specified ID. examples: request: curl: | @@ -3691,7 +3910,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.retrieve( message_id="msg_abc123", thread_id="thread_abc123", @@ -3699,18 +3918,18 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.retrieve( "thread_abc123", "msg_abc123" ); - + console.log(message); } - + main(); response: | { @@ -3768,7 +3987,7 @@ paths: name: Modify message group: threads beta: true - returns: The modified [message](/docs/api-reference/threads/messages/object) object. + returns: The modified [message](/docs/api-reference/messages/object) object. examples: request: curl: | @@ -3785,7 +4004,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.update( message_id="msg_abc12", thread_id="thread_abc123", @@ -3797,9 +4016,9 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.update( "thread_abc123", @@ -3875,7 +4094,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_message = client.beta.threads.messages.delete( message_id="msg_abc12", thread_id="thread_abc123", @@ -3883,15 +4102,15 @@ paths: print(deleted_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const deletedMessage = await openai.beta.threads.messages.del( "thread_abc123", "msg_abc123" ); - + console.log(deletedMessage); } response: | @@ -3901,7 +4120,6 @@ paths: "deleted": true } - /threads/runs: post: operationId: createThreadAndRun @@ -3945,7 +4163,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.create_and_run( assistant_id="asst_abc123", thread={ @@ -3954,13 +4172,13 @@ paths: ] } ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.createAndRun({ assistant_id: "asst_abc123", @@ -3970,10 +4188,10 @@ paths: ], }, }); - + console.log(run); } - + main(); response: | { @@ -3990,7 +4208,7 @@ paths: "completed_at": null, "required_action": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant.", "tools": [], "tool_resources": {}, @@ -4006,7 +4224,8 @@ paths: "incomplete_details": null, "usage": null, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -4028,7 +4247,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.create_and_run( assistant_id="asst_123", thread={ @@ -4038,14 +4257,14 @@ paths: }, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4056,58 +4275,58 @@ paths: }, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - + event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} - + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + event: done data: [DONE] @@ -4153,7 +4372,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4174,7 +4393,7 @@ paths: } } ] - + stream = client.beta.threads.create_and_run( thread={ "messages": [ @@ -4185,14 +4404,14 @@ paths: tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4213,7 +4432,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4225,52 +4444,52 @@ paths: tools: tools, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} - + event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} - + ... - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} - + event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: done data: [DONE] @@ -4300,7 +4519,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -4333,25 +4552,25 @@ paths: python: | from openai import OpenAI client = OpenAI() - + runs = client.beta.threads.runs.list( "thread_abc123" ) - + print(runs) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const runs = await openai.beta.threads.runs.list( "thread_abc123" ); - + console.log(runs); } - + main(); response: | { @@ -4370,7 +4589,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4401,7 +4620,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true }, { "id": "run_abc456", @@ -4416,7 +4636,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4447,7 +4667,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } ], "first_id": "run_abc123", @@ -4466,6 +4687,17 @@ paths: schema: type: string description: The ID of the thread to run. + - name: include[] + in: query + description: &include_param_description | + A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] requestBody: required: true content: @@ -4498,27 +4730,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.create( "thread_abc123", { assistant_id: "asst_abc123" } ); - + console.log(run); } - + main(); response: &run_object_example | { @@ -4534,7 +4766,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4553,7 +4785,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming request: @@ -4569,74 +4802,74 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.create( thread_id="thread_123", assistant_id="asst_123", stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_123", { assistant_id: "asst_123", stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: done data: [DONE] @@ -4677,7 +4910,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4698,21 +4931,21 @@ paths: } } ] - + stream = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123", tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4733,7 +4966,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_abc123", @@ -4743,55 +4976,55 @@ paths: stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: done data: [DONE] @@ -4835,27 +5068,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.retrieve( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.retrieve( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -4871,7 +5104,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4894,7 +5127,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } post: operationId: modifyRun @@ -4947,19 +5181,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.update( thread_id="thread_abc123", run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.update( "thread_abc123", @@ -4970,10 +5204,10 @@ paths: }, } ); - + console.log(run); } - + main(); response: | { @@ -4989,7 +5223,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5022,7 +5256,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: @@ -5082,7 +5317,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5093,13 +5328,13 @@ paths: } ] ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5113,10 +5348,10 @@ paths: ], } ); - + console.log(run); } - + main(); response: | { @@ -5132,7 +5367,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [ { @@ -5168,7 +5403,8 @@ paths: "last_messages": null }, "response_format": "auto", - "tool_choice": "auto" + "tool_choice": "auto", + "parallel_tool_calls": true } - title: Streaming @@ -5190,7 +5426,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5202,14 +5438,14 @@ paths: ], stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5223,61 +5459,61 @@ paths: ], } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} - + event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.message.created data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} - + event: thread.message.completed data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} - + event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + event: done data: [DONE] @@ -5322,27 +5558,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.cancel( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.cancel( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5358,7 +5594,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You summarize books.", "tools": [ { @@ -5374,7 +5610,9 @@ paths: "usage": null, "temperature": 1.0, "top_p": 1.0, - "response_format": "auto" + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } /threads/{thread_id}/runs/{run_id}/steps: @@ -5409,7 +5647,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5420,7 +5658,15 @@ paths: description: *pagination_before_param_description schema: type: string - responses: + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] + responses: "200": description: OK content: @@ -5431,7 +5677,7 @@ paths: name: List run steps group: threads beta: true - returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + returns: A list of [run step](/docs/api-reference/run-steps/step-object) objects. examples: request: curl: | @@ -5442,17 +5688,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_steps = client.beta.threads.runs.steps.list( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run_steps) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.list( "thread_abc123", @@ -5460,7 +5706,7 @@ paths: ); console.log(runStep); } - + main(); response: | { @@ -5523,6 +5769,14 @@ paths: schema: type: string description: The ID of the run step to retrieve. + - name: include[] + in: query + description: *include_param_description + schema: + type: array + items: + type: string + enum: [ "step_details.tool_calls[*].file_search.results[*].content" ] responses: "200": description: OK @@ -5534,7 +5788,7 @@ paths: name: Retrieve run step group: threads beta: true - returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + returns: The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID. examples: request: curl: | @@ -5545,18 +5799,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_step = client.beta.threads.runs.steps.retrieve( thread_id="thread_abc123", run_id="run_abc123", step_id="step_abc123" ) - + print(run_step) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( "thread_abc123", @@ -5565,7 +5819,7 @@ paths: ); console.log(runStep); } - + main(); response: &run_step_object_example | { @@ -5615,7 +5869,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -5648,18 +5902,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_stores = client.beta.vector_stores.list() print(vector_stores) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStores = await openai.beta.vectorStores.list(); console.log(vectorStores); } - + main(); response: | { @@ -5734,7 +5988,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.create( name="Support FAQ" ) @@ -5742,14 +5996,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.create({ name: "Support FAQ" }); console.log(vectorStore); } - + main(); response: | { @@ -5802,7 +6056,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.retrieve( vector_store_id="vs_abc123" ) @@ -5810,14 +6064,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.retrieve( "vs_abc123" ); console.log(vectorStore); } - + main(); response: | { @@ -5868,7 +6122,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.update( vector_store_id="vs_abc123", name="Support FAQ" @@ -5877,7 +6131,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.update( "vs_abc123", @@ -5887,7 +6141,7 @@ paths: ); console.log(vectorStore); } - + main(); response: | { @@ -5940,7 +6194,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store = client.beta.vector_stores.delete( vector_store_id="vs_abc123" ) @@ -5948,14 +6202,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStore = await openai.beta.vectorStores.del( "vs_abc123" ); console.log(deletedVectorStore); } - + main(); response: | { @@ -5990,7 +6244,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6006,7 +6260,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6029,7 +6283,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.files.list( vector_store_id="vs_abc123" ) @@ -6037,14 +6291,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.files.list( "vs_abc123" ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6112,7 +6366,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6121,7 +6375,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFile = await openai.beta.vectorStores.files.create( "vs_abc123", @@ -6131,7 +6385,7 @@ paths: ); console.log(myVectorStoreFile); } - + main(); response: | { @@ -6187,7 +6441,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.retrieve( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6196,7 +6450,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( "vs_abc123", @@ -6204,7 +6458,7 @@ paths: ); console.log(vectorStoreFile); } - + main(); response: | { @@ -6256,7 +6510,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file = client.beta.vector_stores.files.delete( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6265,7 +6519,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( "vs_abc123", @@ -6273,7 +6527,7 @@ paths: ); console.log(deletedVectorStoreFile); } - + main(); response: | { @@ -6328,7 +6582,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["file-abc123", "file-abc456"] @@ -6337,7 +6591,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( "vs_abc123", @@ -6347,7 +6601,7 @@ paths: ); console.log(myVectorStoreFileBatch); } - + main(); response: | { @@ -6408,7 +6662,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6417,7 +6671,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( "vs_abc123", @@ -6425,7 +6679,7 @@ paths: ); console.log(vectorStoreFileBatch); } - + main(); response: | { @@ -6485,7 +6739,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( vector_store_id="vs_abc123", file_batch_id="vsfb_abc123" @@ -6494,7 +6748,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( "vs_abc123", @@ -6502,7 +6756,7 @@ paths: ); console.log(deletedVectorStoreFileBatch); } - + main(); response: | { @@ -6552,7 +6806,7 @@ paths: schema: type: string default: desc - enum: ["asc", "desc"] + enum: [ "asc", "desc" ] - name: after in: query description: *pagination_after_param_description @@ -6568,7 +6822,7 @@ paths: description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." schema: type: string - enum: ["in_progress", "completed", "failed", "cancelled"] + enum: [ "in_progress", "completed", "failed", "cancelled" ] responses: "200": description: OK @@ -6591,7 +6845,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.file_batches.list_files( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6600,7 +6854,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( "vs_abc123", @@ -6608,7 +6862,7 @@ paths: ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6653,17 +6907,22 @@ paths: type: string description: | The ID of an uploaded file that contains requests for the new batch. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string - enum: ["24h"] + enum: [ "24h" ] description: The time frame within which the batch should be processed. Currently only `24h` is supported. metadata: type: object @@ -6696,7 +6955,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.create( input_file_id="file-abc123", endpoint="/v1/chat/completions", @@ -6704,19 +6963,19 @@ paths: ) node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.create({ input_file_id: "file-abc123", endpoint: "/v1/chat/completions", completion_window: "24h" }); - + console.log(batch); } - + main(); response: | { @@ -6787,21 +7046,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.list() node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.batches.list(); - + for await (const batch of list) { console.log(batch); } } - + main(); response: | { @@ -6876,19 +7135,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.retrieve("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.retrieve("batch_abc123"); - + console.log(batch); } - + main(); response: &batch_object | { @@ -6926,7 +7185,7 @@ paths: operationId: cancelBatch tags: - Batch - summary: Cancels an in-progress batch. + summary: Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file. parameters: - in: path name: batch_id @@ -6955,19 +7214,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.cancel("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.cancel("batch_abc123"); - + console.log(batch); } - + main(); response: | { @@ -7000,2725 +7259,5629 @@ paths: } } -components: - securitySchemes: - ApiKeyAuth: - type: http - scheme: "bearer" - - schemas: - Error: - type: object - properties: - code: - type: string - nullable: true - message: - type: string - nullable: false - param: - type: string - nullable: true - type: - type: string - nullable: false - required: - - type - - message - - param - - code - ErrorResponse: - type: object - properties: - error: - $ref: "#/components/schemas/Error" - required: - - error - - ListModelsResponse: - type: object - properties: - object: - type: string - enum: [list] - data: - type: array - items: - $ref: "#/components/schemas/Model" - required: - - object - - data - DeleteModelResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - required: - - id - - object - - deleted - - CreateCompletionRequest: - type: object - properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true - oneOf: - - type: string - default: "" - example: "This is a test." - - type: array - items: - type: string - default: "" - example: "This is a test." - - type: array - minItems: 1 - items: + # Organization + # Audit Logs List + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + required: false + schema: + type: object + properties: + gt: type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - best_of: - type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - echo: - type: boolean - default: false - nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - logit_bias: &completions_logit_bias - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: type: integer - description: &completions_logit_bias_description | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - logprobs: &completions_logprobs_configuration - type: integer - minimum: 0 + default: 20 + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + response: | + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteListResponse' + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } + + post: + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InviteRequest' + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } + + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteDeleteResponse' + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } + + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserListResponse' + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserDeleteResponse' + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + - name: include_archived + in: query + schema: + type: boolean + default: false + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectListResponse' + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } + + post: + summary: Create a new project in the organization. Projects can be created and archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectCreateRequest' + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + } + + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpdateRequest' + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + }' + + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + + + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + post: + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserCreateRequest' + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + requestBody: + description: The project user update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an archived project, which has no users + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } + + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } + + post: + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project service account + group: administration + returns: The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } + + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccount' + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } + + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyListResponse' + + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKey' + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: | + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: [ "gpt-3.5-turbo-instruct", "davinci-002", "babbage-002" ] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 maximum: 5 default: null nullable: true description: &completions_logprobs_description | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: [ "stop", "length", "content_filter" ] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [ text_completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } - The maximum value for `logprobs` is 5. - max_tokens: - type: integer - minimum: 0 - default: 16 - example: 16 + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: [ "text" ] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartRefusal: + type: object + title: Refusal content part + properties: + type: + type: string + enum: [ "refusal" ] + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + required: + - type + - refusal + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: + type: string + enum: [ "system" ] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: [ "user" ] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) that can be generated in the completion. + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. + role: + type: string + enum: [ "assistant" ] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [ 0, 1 ] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: [ "tool" ] + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: [ "function" ] + description: The role of the messages author, in this case `function`. + content: nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - seed: &completions_seed_param - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - default: null - nullable: true - oneOf: - - type: string - default: <|endoftext|> - example: "\n" - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - stream: - description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: type: boolean nullable: true default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - suffix: - description: | - The suffix that comes after a completion of inserted text. + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + required: + - name - This parameter is only supported for `gpt-3.5-turbo-instruct`. - default: null - nullable: true + ResponseFormatText: + type: object + properties: + type: type: string - example: "test." - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + description: "The type of response format being defined: `text`" + enum: [ "text" ] + required: + - type - We generally recommend altering this or `temperature` but not both. - user: &end_user_param_configuration + ResponseFormatJsonObject: + type: object + properties: + type: type: string - example: user-1234 - description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + description: "The type of response format being defined: `json_object`" + enum: [ "json_object" ] required: - - model - - prompt + - type - CreateCompletionResponse: + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - description: The list of completion choices the model generated for the input prompt. - items: - type: object - required: - - finish_reason - - index - - logprobs - - text - properties: - finish_reason: - type: string - description: &completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] - index: - type: integer - logprobs: - type: object - nullable: true - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - text: - type: string - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: + type: type: string - description: | - This fingerprint represents the backend configuration that the model runs with. + description: 'The type of response format being defined: `json_schema`' + enum: [ 'json_schema' ] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: type: string - description: The object type, which is always "text_completion" - enum: [text_completion] - usage: - $ref: "#/components/schemas/CompletionUsage" - required: - - id - - object - - created - - model - - choices - x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-4-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function - ChatCompletionRequestMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true - ChatCompletionRequestMessageContentPartImage: + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: type: object - title: Image content part properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. type: type: string - enum: ["image_url"] - description: The type of the content part. - image_url: + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: type: object + description: The function that the model called. properties: - url: + name: type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: + description: The name of the function to call. + arguments: type: string - description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] - default: "auto" + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. required: - - url + - name + - arguments required: + - id - type - - image_url + - function - ChatCompletionRequestMessageContentPartText: + ChatCompletionMessageToolCallChunk: type: object - title: Text content part properties: - type: + index: + type: integer + id: type: string - enum: ["text"] - description: The type of the content part. - text: + description: The ID of the tool call. + type: type: string - description: The text content. + enum: [ "function" ] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. required: - - type - - text + - index - ChatCompletionRequestMessage: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function - ChatCompletionRequestSystemMessage: + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. type: object - title: System message + nullable: true + default: null + properties: + include_usage: + type: boolean + description: | + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. properties: content: - description: The contents of the system message. type: string - role: + description: The contents of the message. + nullable: true + refusal: type: string - enum: ["system"] - description: The role of the messages author, in this case `system`. - name: + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + enum: [ "assistant" ] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments required: - - content - role + - content + - refusal - ChatCompletionRequestUserMessage: + ChatCompletionStreamResponseDelta: type: object - title: User message + description: A chat completion delta generated by streamed model responses. properties: content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: [ "system", "user", "assistant", "tool" ] + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). + type: integer + nullable: true + deprecated: true + max_completion_tokens: + description: | + An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + type: integer + nullable: true + + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: + description: | + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true description: | - The contents of the user message. + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: [ "auto", "default" ] + nullable: true + default: null + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null oneOf: - type: string - description: The text contents of the message. - title: Text content + nullable: true - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" minItems: 1 - x-oaiExpandable: true - role: - type: string - enum: ["user"] - description: The role of the messages author, in this case `user`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role - - ChatCompletionRequestAssistantMessage: - type: object - title: Assistant message - properties: - content: + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean nullable: true - type: string - description: | - The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - role: - type: string - enum: ["assistant"] - description: The role of the messages author, in this case `assistant`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: *end_user_param_configuration function_call: - type: object deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - required: - - role - - ChatCompletionRequestToolMessage: - type: object - title: Tool message - properties: - role: - type: string - enum: ["tool"] - description: The role of the messages author, in this case `tool`. - content: - type: string - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - required: - - role - - content - - tool_call_id - - ChatCompletionRequestFunctionMessage: - type: object - title: Function message - deprecated: true - properties: - role: - type: string - enum: ["function"] - description: The role of the messages author, in this case `function`. - content: - nullable: true - type: string - description: The contents of the function message. - name: - type: string - description: The name of the function to call. - required: - - role - - content - - name - - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - additionalProperties: true - - ChatCompletionFunctions: - type: object - deprecated: true - properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - required: - - name + description: | + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [ none, auto ] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" - ChatCompletionFunctionCallOption: - type: object - description: > - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - properties: - name: - type: string - description: The name of the function to call. required: - - name + - model + - messages - ChatCompletionTool: + CreateChatCompletionResponse: type: object + description: Represents a chat completion response returned by model, based on the provided input. properties: - type: + id: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function - - FunctionObject: - type: object - properties: - description: + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - required: - - name - - ChatCompletionToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools are present. - oneOf: - - type: string - description: > - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - enum: [none, auto, required] - - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - x-oaiExpandable: true - - ChatCompletionNamedToolChoice: - type: object - description: Specifies a tool the model should use. Use to force the model to call a specific function. - properties: - type: + enum: [ "scale", "default" ] + example: "scale" + nullable: true + system_fingerprint: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" required: - - type - - function - - ChatCompletionMessageToolCalls: - type: array - description: The tool calls generated by the model, such as function calls. - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCall" + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example - ChatCompletionMessageToolCall: + CreateChatCompletionFunctionResponse: type: object + description: Represents a chat completion response returned by model, based on the provided input. properties: - # TODO: index included when streaming id: type: string - description: The ID of the tool call. - type: + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: + [ "stop", "length", "function_call", "content_filter" ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - description: The function that the model called. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [ chat.completion ] + usage: + $ref: "#/components/schemas/CompletionUsage" required: + - choices + - created - id - - type - - function + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example - ChatCompletionMessageToolCallChunk: + ChatCompletionTokenLogprob: type: object properties: - index: - type: integer - id: - type: string - description: The ID of the tool call. - type: + token: &chat_completion_response_logprobs_token + description: The token. type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes required: - - index - - # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. - ChatCompletionRole: - type: string - description: The role of the author of a message - enum: - - system - - user - - assistant - - tool - - function + - token + - logprob + - bytes + - top_logprobs - ChatCompletionStreamOptions: - description: | - Options for streaming response. Only set this when you set `stream: true`. + ListPaginatedFineTuningJobsResponse: type: object - nullable: true - default: null properties: - include_usage: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: type: boolean - description: | - If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + object: + type: string + enum: [ list ] + required: + - object + - data + - has_more - ChatCompletionResponseMessage: + CreateChatCompletionStreamResponse: type: object - description: A chat completion message generated by the model. + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. properties: - content: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: | + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: type: string - description: The contents of the message. + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + type: string + enum: [ "scale", "default" ] + example: "scale" nullable: true - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - role: + system_fingerprint: type: string - enum: ["assistant"] - description: The role of the author of this message. - function_call: + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [ chat.completion.chunk ] + usage: type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). required: - - name - - arguments + - prompt_tokens + - completion_tokens + - total_tokens required: - - role - - content + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example - ChatCompletionStreamResponseDelta: + CreateChatCompletionImageResponse: type: object - description: A chat completion delta generated by streamed model responses. - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: - deprecated: true - type: object - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - tool_calls: - type: array - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" - role: - type: string - enum: ["system", "user", "assistant", "tool"] - description: The role of the author of this message. + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example - CreateChatCompletionRequest: + CreateImageRequest: type: object properties: - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: "A cute baby sea otter" model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" anyOf: - type: string - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] + enum: [ "dall-e-2", "dall-e-3" ] x-oaiTypeLabel: string - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. - type: boolean - default: false - nullable: true - top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. - type: integer - minimum: 0 - maximum: 20 - nullable: true - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - type: integer + default: "dall-e-2" + example: "dall-e-3" nullable: true - n: + description: The model to use for image generation. + n: &images_n type: integer minimum: 1 - maximum: 128 + maximum: 10 default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: [ "standard", "hd" ] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format + type: string + enum: [ "url", "b64_json" ] + default: "url" + example: "url" nullable: true - description: *completions_presence_penalty_description - response_format: - type: object - description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size + type: string + enum: [ "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" ] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: [ "vivid", "natural" ] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration + required: + - prompt - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. - seed: + ImagesResponse: + properties: + created: type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - This feature is in Beta. - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - x-oaiMeta: - beta: true - stop: - description: | - Up to 4 sequences where the API will stop generating further tokens. - default: null - oneOf: + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - temperature: - type: number - minimum: 0 - maximum: 2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 default: 1 example: 1 nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: [ "256x256", "512x512", "1024x1024" ] + default: "1024x1024" + example: "1024x1024" nullable: true - description: *completions_top_p_description - tools: - type: array - description: > - A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. - items: - $ref: "#/components/schemas/ChatCompletionTool" - tool_choice: - $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format user: *end_user_param_configuration - function_call: - deprecated: true - description: | - Deprecated in favor of `tool_choice`. + required: + - prompt + - image - Controls which (if any) function is called by the model. - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: [ "dall-e-2" ] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image - `none` is the default when no functions are present. `auto` is the default if functions are present. + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify oneOf: - type: string - description: > - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - x-oaiExpandable: true - functions: - deprecated: true + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: description: | - Deprecated in favor of `tools`. + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: [ "text-moderation-latest", "text-moderation-stable" ] + x-oaiTypeLabel: string + required: + - input - A list of functions the model may generate JSON inputs for. + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: type: array - minItems: 1 - maxItems: 128 + description: A list of moderation objects. items: - $ref: "#/components/schemas/ChatCompletionFunctions" - + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores required: + - id - model - - messages + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - CreateChatCompletionResponse: + ListFilesResponse: type: object - description: Represents a chat completion response returned by model, based on the provided input. properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: + data: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - logprobs: &chat_completion_response_logprobs - description: Log probability information for the choice. - type: object - nullable: true - properties: - content: - description: A list of message content tokens with log probability information. - type: array - items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - required: - - content - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: + $ref: "#/components/schemas/OpenAIFile" + object: type: string - description: The model used for the chat completion. - system_fingerprint: + enum: [ list ] + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. type: string + format: binary + purpose: description: | - This fingerprint represents the backend configuration that the model runs with. + The intended purpose of the uploaded file. + + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). + type: string + enum: [ "assistants", "batch", "fine-tune", "vision" ] + required: + - file + - purpose - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + DeleteFileResponse: + type: object + properties: + id: + type: string object: type: string - description: The object type, which is always `chat.completion`. - enum: [chat.completion] - usage: - $ref: "#/components/schemas/CompletionUsage" + enum: [ file ] + deleted: + type: boolean required: - - choices - - created - id - - model - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + - deleted - CreateChatCompletionFunctionResponse: + CreateUploadRequest: type: object - description: Represents a chat completion response returned by model, based on the provided input. + additionalProperties: false properties: - id: + filename: + description: | + The name of the file to upload. type: string - description: A unique identifier for the chat completion. - choices: + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + type: string + enum: [ "assistants", "batch", "fine-tune", "vision" ] + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + type: string + required: + - filename + - purpose + - bytes + - mime_type + + AddUploadPartRequest: + type: object + additionalProperties: false + properties: + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary + required: + - data + + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + description: | + The ordered list of Part IDs. items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: - &chat_completion_function_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: ["stop", "length", "function_call", "content_filter"] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + type: string + required: + - part_ids + + CancelUploadRequest: + type: object + additionalProperties: false + + CreateFineTuningJobRequest: + type: object + properties: model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" + anyOf: + - type: string + - type: string + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - description: The model used for the chat completion. - system_fingerprint: + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [ auto ] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [ auto ] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 64 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. type: string + minLength: 1 + maxLength: 64 + default: null + nullable: true + validation_file: description: | - This fingerprint represents the backend configuration that the model runs with. + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: + type: object + required: + - type + - wandb + properties: + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [ wandb ] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always `chat.completion`. - enum: [chat.completion] - usage: - $ref: "#/components/schemas/CompletionUsage" + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. + type: integer + nullable: true + minimum: 0 + maximum: 2147483647 + example: 42 required: - - choices - - created - - id - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_function_example + - training_file - ChatCompletionTokenLogprob: + ListFineTuningJobEventsResponse: type: object properties: - token: &chat_completion_response_logprobs_token - description: The token. - type: string - logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - type: number - bytes: &chat_completion_response_logprobs_bytes - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - type: array - items: - type: integer - nullable: true - top_logprobs: - description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + data: type: array items: - type: object - properties: - token: *chat_completion_response_logprobs_token - logprob: *chat_completion_response_logprobs_token_logprob - bytes: *chat_completion_response_logprobs_bytes - required: - - token - - logprob - - bytes + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: [ list ] required: - - token - - logprob - - bytes - - top_logprobs + - object + - data - ListPaginatedFineTuningJobsResponse: + ListFineTuningJobCheckpointsResponse: type: object properties: data: type: array items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean + $ref: "#/components/schemas/FineTuningJobCheckpoint" object: type: string - enum: [list] + enum: [ list ] + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean required: - object - data - has_more - CreateChatCompletionStreamResponse: + CreateEmbeddingRequest: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + additionalProperties: false properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array + input: description: | - A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the - last chunk if you set `stream_options: {"include_usage": true}`. - items: - type: object - required: - - delta - - finish_reason - - index - properties: - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - logprobs: *chat_completion_response_logprobs - finish_reason: + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: string - description: *chat_completion_finish_reason_description - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - nullable: true - index: + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: type: integer - description: The index of the choice in the list of choices. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: + [ + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", + ] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - description: The model to generate the completion. - system_fingerprint: - type: string + enum: [ "float", "base64" ] + dimensions: description: | - This fingerprint represents the backend configuration that the model runs with. - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. object: type: string - description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] + description: The object type, which is always "list". + enum: [ list ] usage: type: object - description: | - An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. - When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + description: The usage information for the request. properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. prompt_tokens: type: integer - description: Number of tokens in the prompt. + description: The number of tokens used by the prompt. total_tokens: type: integer - description: Total number of tokens used in the request (prompt + completion). + description: The total number of tokens used by the request. required: - prompt_tokens - - completion_tokens - total_tokens required: - - choices - - created - - id - - model - object - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example - - CreateChatCompletionImageResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_image_example + - model + - data + - usage - CreateImageRequest: + CreateTranscriptionRequest: type: object + additionalProperties: false properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string - example: "A cute baby sea otter" + x-oaiTypeLabel: file + format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: ["dall-e-2", "dall-e-3"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-3" - nullable: true - description: The model to use for image generation. - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - quality: - type: string - enum: ["standard", "hd"] - default: "standard" - example: "standard" - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - response_format: &images_response_format + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. type: string - enum: ["url", "b64_json"] - default: "url" - example: "url" - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - size: &images_size + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - style: + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + type: array + items: + type: string + enum: + - word + - segment + default: [ segment ] + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the provided input. + properties: + text: type: string - enum: ["vivid", "natural"] - default: "vivid" - example: "vivid" - nullable: true - description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - user: *end_user_param_configuration + description: The transcribed text. required: - - prompt + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: *basic_transcription_response_example - ImagesResponse: + TranscriptionSegment: + type: object properties: - created: + id: type: integer - data: + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: type: array items: - $ref: "#/components/schemas/Image" + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. required: - - created - - data + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob - Image: + TranscriptionWord: type: object - description: Represents the url or the content of an image generated by the OpenAI API. properties: - b64_json: - type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: - type: string - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: + word: type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - x-oaiMeta: - name: The image object - example: | - { - "url": "...", - "revised_prompt": "..." - } + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [ word, start, end ] - CreateImageEditRequest: + CreateTranscriptionResponseVerboseJson: type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. - type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + language: type: string - example: "A cute baby sea otter wearing a beret" - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + description: The language of the input audio. + duration: type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: ["dall-e-2"] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &dalle2_images_size + description: The duration of the input audio. + text: type: string - enum: ["256x256", "512x512", "1024x1024"] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: *images_response_format - user: *end_user_param_configuration - required: - - prompt - - image + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: *verbose_transcription_response_example - CreateImageVariationRequest: + AudioResponseFormat: + description: | + The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + + CreateTranslationRequest: type: object + additionalProperties: false properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string + x-oaiTypeLabel: file format: binary model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - type: string - enum: ["dall-e-2"] + enum: [ "whisper-1" ] x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: *images_n - response_format: *images_response_format - size: *dalle2_images_size - user: *end_user_param_configuration + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: + type: object + properties: + text: + type: string required: - - image + - text - CreateModerationRequest: + CreateTranslationResponseVerboseJson: type: object properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [ language, duration, text ] + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: model: description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: [ "tts-1", "tts-1-hd" ] x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + type: string + enum: [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: + - model - input + - voice - CreateModerationResponse: - type: object - description: Represents if a given text input is potentially harmful. + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: id: type: string - description: The unique identifier for the moderation request. - model: + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: type: string - description: The model used to generate the moderation results. - results: - type: array - description: A list of moderation objects. - items: - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + description: The object type, which is always "model". + enum: [ model ] + owned_by: + type: string + description: The organization that owns the model. required: - id - - model - - results + - object + - created + - owned_by x-oaiMeta: - name: The moderation object - example: *moderation_example + name: The model object + example: *retrieve_model_response - ListFilesResponse: - type: object + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. object: type: string - enum: [list] + description: The object type, which is always `file`. + enum: [ "file" ] + purpose: + type: string + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + enum: + [ + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", + ] + status: + type: string + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: [ "uploaded", "processed", "error" ] + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: + - id - object - - data - - CreateFileRequest: + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Upload: type: object - additionalProperties: false + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. properties: - file: - description: | - The File object (not file name) to be uploaded. + id: type: string - format: binary + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. purpose: - description: | - The intended purpose of the uploaded file. - - Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: ["assistants", "batch", "fine-tune"] + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: [ "pending", "completed", "cancelled", "expired" ] + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: [ upload ] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. required: - - file + - bytes + - created_at + - expires_at + - filename + - id - purpose - - DeleteFileResponse: + - status + - step_number + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: type: object + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. properties: id: type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. object: type: string - enum: [file] - deleted: - type: boolean + description: The object type, which is always `upload.part`. + enum: [ 'upload.part' ] required: + - created_at - id - object - - deleted - - CreateFineTuningJobRequest: + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + Embedding: type: object + description: | + Represents an embedding vector returned by embedding endpoint. properties: - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] - x-oaiTypeLabel: string - training_file: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: [ embedding ] + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + FineTuningJob: + type: object + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + properties: + id: type: string - example: "file-abc123" + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. hyperparameters: type: object - description: The hyperparameters used for the fine-tuning job. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - batch_size: - description: | - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: | - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - oneOf: - - type: string - enum: [auto] - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. oneOf: - type: string - enum: [auto] + enum: [ auto ] - type: integer minimum: 1 maximum: 50 default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: type: string - minLength: 1 - maxLength: 40 - default: null - nullable: true - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + description: The base model that is being fine-tuned. + object: type: string - nullable: true - example: "file-abc123" - integrations: + description: The object type, which is always "fine_tuning.job". + enum: [ fine_tuning.job ] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: type: array - description: A list of integrations to enable for your fine-tuning job. - nullable: true - items: - type: object - required: - - type - - wandb - properties: - type: - description: | - The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - oneOf: - - type: string - enum: [wandb] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true seed: - description: | - The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. - If a seed is not specified, one will be generated for you. + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: type: integer nullable: true - minimum: 0 - maximum: 2147483647 - example: 42 + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id - model + - object + - organization_id + - result_files + - status + - trained_tokens - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example - ListFineTuningJobEventsResponse: + FineTuningIntegration: type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobEvent" - object: + type: type: string - enum: [list] - required: - - object - - data + description: "The type of the integration being enabled for the fine-tuning job" + enum: [ "wandb" ] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" - ListFineTuningJobCheckpointsResponse: + FineTuningJobEvent: type: object + description: Fine-tuning job event object properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: + id: type: string - enum: [list] - first_id: + created_at: + type: integer + level: type: string - nullable: true - last_id: + enum: [ "info", "warn", "error" ] + message: type: string - nullable: true - has_more: - type: boolean + object: + type: string + enum: [ fine_tuning.job.event ] required: + - id - object - - data - - has_more + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } - CreateEmbeddingRequest: + FineTuningJobCheckpoint: type: object - additionalProperties: false + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. properties: - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - title: string - description: The string that will be turned into an embedding. - default: "" - example: "This is a test." - - type: array - title: array - description: The array of strings that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: string - default: "" - example: "['This is a test.']" - - type: array - title: array - description: The array of integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - title: array - description: The array of arrays containing integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - x-oaiExpandable: true - model: - description: *model_description - example: "text-embedding-3-small" - anyOf: - - type: string - - type: string - enum: - [ - "text-embedding-ada-002", - "text-embedding-3-small", - "text-embedding-3-large", - ] - x-oaiTypeLabel: string - encoding_format: - description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." - example: "float" - default: "float" + id: type: string - enum: ["float", "base64"] - dimensions: - description: | - The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: type: integer - minimum: 1 - user: *end_user_param_configuration - required: - - model - - input - - CreateEmbeddingResponse: - type: object - properties: - data: - type: array - description: The list of embeddings generated by the model. - items: - $ref: "#/components/schemas/Embedding" - model: + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: type: string - description: The name of the model used to generate the embedding. + description: The name of the fine-tuning job that this checkpoint was created from. object: type: string - description: The object type, which is always "list". - enum: [list] - usage: - type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [ fine_tuning.job.checkpoint ] required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics - object - - model - - data - - usage + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } - CreateTranscriptionRequest: + FinetuneChatRequestInput: type: object - additionalProperties: false + description: The per-line training example of a fine-tuning input file for chat models properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - type: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - timestamp_granularities[]: - description: | - The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + messages: type: array + minItems: 1 items: - type: string - enum: - - word - - segment - default: [segment] - required: - - file - - model + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: | + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponseJson: + FinetuneCompletionRequestInput: type: object - description: Represents a transcription response returned by model, based on the provided input. + description: The per-line training example of a fine-tuning input file for completions models properties: - text: + prompt: type: string - description: The transcribed text. - required: - - text + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. x-oaiMeta: - name: The transcription object - group: audio - example: *basic_transcription_response_example + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } - TranscriptionSegment: + CompletionUsage: type: object + description: Usage statistics for the completion request. properties: - id: + completion_tokens: type: integer - description: Unique identifier of the segment. - seek: + description: Number of tokens in the generated completion. + prompt_tokens: type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + description: Breakdown of tokens used in a completion. + properties: + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob + - prompt_tokens + - completion_tokens + - total_tokens - TranscriptionWord: + RunCompletionUsage: type: object + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - word: - type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. - required: [word, start, end] + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + RunStepCompletionUsage: + type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [ auto ] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + x-oaiExpandable: true - CreateTranscriptionResponseVerboseJson: + AssistantObject: type: object - description: Represents a verbose json transcription response returned by model, based on the provided input. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: - language: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The language of the input audio. - duration: + object: + description: The object type, which is always `assistant`. type: string - description: The duration of the input audio. - text: + enum: [ assistant ] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. type: string - description: The transcribed text. - words: - type: array - description: Extracted words and their corresponding timestamps. - items: - $ref: "#/components/schemas/TranscriptionWord" - segments: + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [ ] type: array - description: Segments of the transcribed text and their corresponding details. + maxItems: 128 items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata x-oaiMeta: - name: The transcription object - group: audio - example: *verbose_transcription_response_example + name: The assistant object + beta: true + example: *create_assistants_example - CreateTranslationRequest: + CreateAssistantRequest: type: object additionalProperties: false properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 + description: *model_description + example: "gpt-4o" anyOf: - type: string - type: string - enum: ["whisper-1"] + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + name: + description: *assistant_name_param_description type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - default: json - temperature: + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [ vector_store_ids ] + - required: [ vector_stores ] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description type: number - default: 0 + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - file - model - # Note: This does not currently support the non-default response format types. - CreateTranslationResponseJson: - type: object - properties: - text: - type: string - required: - - text - - CreateTranslationResponseVerboseJson: - type: object - properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: - type: array - description: Segments of the translated text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] - - CreateSpeechRequest: + ModifyAssistantRequest: type: object additionalProperties: false properties: model: - description: | - One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + description: *model_description anyOf: - type: string - - type: string - enum: ["tts-1", "tts-1-hd"] - x-oaiTypeLabel: string - input: - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - maxLength: 4096 - voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). - type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] - response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." - default: "mp3" - type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] - speed: - description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." - type: number - default: 1.0 - minimum: 0.25 - maximum: 4.0 - required: - - model - - input - - voice - - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. - properties: - id: - type: string - description: The model identifier, which can be referenced in the API endpoints. - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - type: string - description: The object type, which is always "model". - enum: [model] - owned_by: - type: string - description: The organization that owns the model. - required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response - - OpenAIFile: - title: OpenAIFile - description: The `File` object represents a document that has been uploaded to OpenAI. - properties: - id: - type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - description: The size of the file, in bytes. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - type: string - description: The object type, which is always `file`. - enum: ["file"] - purpose: + name: + description: *assistant_name_param_description type: string - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - enum: - [ - "assistants", - "assistants_output", - "batch", - "batch_output", - "fine-tune", - "fine-tune-results", - "vision" - ] - status: + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - deprecated: true - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] - status_details: + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - deprecated: true - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. - required: - - id - - object - - bytes - - created_at - - filename - - purpose - - status - x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - Embedding: - type: object - description: | - Represents an embedding vector returned by embedding endpoint. - properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [ ] type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + maxItems: 128 items: - type: number - object: - type: string - description: The object type, which is always "embedding". - enum: [embedding] - required: - - index - - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } - - FineTuningJob: - type: object - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. - properties: - id: - type: string - description: The object identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: - type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - nullable: true - required: - - code - - message - - param - fine_tuned_model: - type: string + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - finished_at: - type: integer + temperature: + description: *run_temperature_description + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - properties: - n_epochs: - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - required: - - n_epochs - model: + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: + type: object + properties: + id: type: string - description: The base model that is being fine-tuned. + deleted: + type: boolean object: type: string - description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] - organization_id: + enum: [ assistant.deleted ] + required: + - id + - object + - deleted + + ListAssistantsResponse: + type: object + properties: + object: type: string - description: The organization that owns the fine-tuning job. - result_files: + example: "list" + data: type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). items: - type: string - example: file-abc123 - status: - type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - enum: - [ - "validating_files", - "queued", - "running", - "succeeded", - "failed", - "cancelled", - ] - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - training_file: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: + example: "asst_abc123" + last_id: type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - integrations: - type: array - nullable: true - description: A list of integrations to enable for this fine-tuning job. - maxItems: 5 - items: - oneOf: - - $ref: "#/components/schemas/FineTuningIntegration" - x-oaiExpandable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed + - data + - first_id + - last_id + - has_more x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example + name: List assistants response object + group: chat + example: *list_assistants_example - FineTuningIntegration: + AssistantToolsCode: type: object - title: Fine-Tuning Job Integration + title: Code interpreter tool + properties: + type: + type: string + description: "The type of tool being defined: `code_interpreter`" + enum: [ "code_interpreter" ] required: - type - - wandb + + AssistantToolsFileSearch: + type: object + title: FileSearch tool properties: type: type: string - description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] - wandb: + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + file_search: type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project + description: Overrides for the file search tool. properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: + max_num_results: + type: integer + minimum: 1 + maximum: 50 description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" + required: + - type - FineTuningJobEvent: + FileSearchRankingOptions: + title: File search tool call ranking options type: object - description: Fine-tuning job event object + description: | + The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. + + See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information. properties: - id: + ranker: type: string - created_at: - type: integer - level: + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + enum: [ "auto", "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - score_threshold + + AssistantToolsFileSearchTypeOnly: + type: object + title: FileSearch tool + properties: + type: type: string - enum: ["info", "warn", "error"] - message: + description: "The type of tool being defined: `file_search`" + enum: [ "file_search" ] + required: + - type + + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: type: string - object: + description: "The type of tool being defined: `function`" + enum: [ "function" ] + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + TruncationObject: + type: object + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + properties: + type: + type: string + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: [ "auto", "last_messages" ] + last_messages: + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [ none, auto, required ] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific tool. + properties: + type: type: string - enum: [fine_tuning.job.event] + enum: [ "function", "code_interpreter", "file_search" ] + description: The type of the tool. If type is `function`, the function name must be set + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name required: - - id - - object - - created_at - - level - - message - x-oaiMeta: - name: The fine-tuning job event object - example: | - { - "object": "fine_tuning.job.event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" - } + - type - FineTuningJobCheckpoint: + RunObject: type: object - title: FineTuningJobCheckpoint - description: | - The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The checkpoint identifier, which can be referenced in the API endpoints. + object: + description: The object type, which is always `thread.run`. + type: string + enum: [ "thread.run" ] created_at: + description: The Unix timestamp (in seconds) for when the run was created. type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. + type: string + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "incomplete", + "expired", + ] + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: [ "submit_tool_outputs" ] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. type: integer - description: The step number that the checkpoint was created at. - metrics: + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. type: object - description: Metrics at the step number during the fine-tuning job. + nullable: true properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - fine_tuning_job_id: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: [ "max_completion_tokens", "max_prompt_tokens" ] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [ ] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - id - - metrics - object - - step_number + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format x-oaiMeta: - name: The fine-tuning job checkpoint object + name: The run object + beta: true example: | { - "object": "fine_tuning.job.checkpoint", - "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", - "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", - "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", - "metrics": { - "step": 88, - "train_loss": 0.478, - "train_mean_token_accuracy": 0.924, - "valid_loss": 10.112, - "valid_mean_token_accuracy": 0.145, - "full_valid_loss": 0.567, - "full_valid_mean_token_accuracy": 0.944 + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null }, - "step_number": 88 + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } - - CompletionUsage: + CreateRunRequest: type: object - description: Usage statistics for the completion request. + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: type: integer - description: Number of tokens in the prompt. - total_tokens: + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: type: integer - description: Total number of tokens used in the request (prompt + completion). + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - prompt_tokens - - completion_tokens - - total_tokens - - RunCompletionUsage: + - thread_id + - assistant_id + ListRunsResponse: type: object - description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - RunStepCompletionUsage: + - object + - data + - first_id + - last_id + - has_more + ModifyRunRequest: type: object - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsApiResponseFormat" - x-oaiExpandable: true - - AssistantsApiResponseFormat: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. + additionalProperties: false properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + required: + - tool_outputs - AssistantObject: + RunToolCallObject: type: object - title: Assistant - description: Represents an `assistant` that can call the model and use tools. + description: Tool call objects properties: id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `assistant`. type: string - enum: [assistant] - created_at: - description: The Unix timestamp (in seconds) for when the assistant was created. - type: integer - name: - description: &assistant_name_param_description | - The name of the assistant. The maximum length is 256 characters. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: type: string - maxLength: 256 - nullable: true - description: - description: &assistant_description_param_description | - The description of the assistant. The maximum length is 512 characters. + description: The type of tool call the output is required for. For now, this is always `function`. + enum: [ "function" ] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - maxLength: 512 - nullable: true + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. model: - description: *model_description - type: string + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true instructions: - description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 256,000 characters. + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - maxLength: 256000 nullable: true tools: - description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - maxItems: 128 + maxItems: 20 items: oneOf: - $ref: "#/components/schemas/AssistantToolsCode" - $ref: "#/components/schemas/AssistantToolsFileSearch" - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | @@ -9730,8 +12893,8 @@ components: file_ids: type: array description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] maxItems: 20 items: type: string @@ -9747,20 +12910,18 @@ components: type: string nullable: true metadata: - description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + description: *metadata_description type: object x-oaiTypeLabel: map nullable: true temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. type: number minimum: 0 maximum: 2 default: 1 example: 1 nullable: true + description: *run_temperature_description top_p: type: number minimum: 0 @@ -9768,92 +12929,116 @@ components: default: 1 example: 1 nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" nullable: true + required: + - thread_id + - assistant_id + + ThreadObject: + type: object + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread`. + type: string + enum: [ "thread" ] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - created_at - - name - - description - - model - - instructions - - tools + - tool_resources - metadata x-oaiMeta: - name: The assistant object + name: The thread object beta: true - example: *create_assistants_example + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } - CreateAssistantRequest: + CreateThreadRequest: type: object additionalProperties: false properties: - model: - description: *model_description - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - name: - description: *assistant_name_param_description - type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description - type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [] + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. type: array - maxItems: 128 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true + $ref: "#/components/schemas/CreateMessageRequest" tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -9862,7 +13047,7 @@ components: type: array description: | A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + default: [ ] maxItems: 20 items: type: string @@ -9872,14 +13057,14 @@ components: vector_store_ids: type: array description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string vector_stores: type: array description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: object @@ -9891,84 +13076,76 @@ components: maxItems: 10000 items: type: string - metadata: + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - oneOf: - - required: [vector_store_ids] - - required: [vector_stores] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: [ "auto" ] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: [ "static" ] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + x-oaiExpandable: true + oneOf: + - required: [ vector_store_ids ] + - required: [ vector_stores ] nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - required: - - model - ModifyAssistantRequest: + ModifyThreadRequest: type: object additionalProperties: false properties: - model: - description: *model_description - anyOf: - - type: string - name: - description: *assistant_name_param_description - type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description - type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description - type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [] - type: array - maxItems: 128 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true tool_resources: type: object description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: code_interpreter: type: object @@ -9976,8 +13153,8 @@ components: file_ids: type: array description: | - Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [ ] maxItems: 20 items: type: string @@ -9987,7 +13164,7 @@ components: vector_store_ids: type: array description: | - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. maxItems: 1 items: type: string @@ -9997,30 +13174,8 @@ components: type: object x-oaiTypeLabel: map nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - DeleteAssistantResponse: + DeleteThreadResponse: type: object properties: id: @@ -10029,14 +13184,13 @@ components: type: boolean object: type: string - enum: [assistant.deleted] + enum: [ thread.deleted ] required: - id - object - deleted - ListAssistantsResponse: - type: object + ListThreadsResponse: properties: object: type: string @@ -10044,7 +13198,7 @@ components: data: type: array items: - $ref: "#/components/schemas/AssistantObject" + $ref: "#/components/schemas/ThreadObject" first_id: type: string example: "asst_abc123" @@ -10060,438 +13214,276 @@ components: - first_id - last_id - has_more - x-oaiMeta: - name: List assistants response object - group: chat - example: *list_assistants_example - - AssistantToolsCode: - type: object - title: Code interpreter tool - properties: - type: - type: string - description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] - required: - - type - - AssistantToolsFileSearch: - type: object - title: FileSearch tool - properties: - type: - type: string - description: "The type of tool being defined: `file_search`" - enum: ["file_search"] - required: - - type - - AssistantToolsFunction: - type: object - title: Function tool - properties: - type: - type: string - description: "The type of tool being defined: `function`" - enum: ["function"] - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function - - TruncationObject: - type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - properties: - type: - type: string - description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] - last_messages: - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - minimum: 1 - nullable: true - required: - - type - - AssistantsApiToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tools and instead generates a message. - `auto` is the default value and means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - - oneOf: - - type: string - description: > - `none` means the model will not call any tools and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] - - $ref: "#/components/schemas/AssistantsNamedToolChoice" - x-oaiExpandable: true - - AssistantsNamedToolChoice: - type: object - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - properties: - type: - type: string - enum: ["function", "code_interpreter", "file_search"] - description: The type of the tool. If type is `function`, the function name must be set - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name - required: - - type - RunObject: + MessageObject: type: object - title: A run on a thread - description: Represents an execution run on a [thread](/docs/api-reference/threads). + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run`. + description: The object type, which is always `thread.message`. type: string - enum: ["thread.run"] + enum: [ "thread.message" ] created_at: - description: The Unix timestamp (in seconds) for when the run was created. + description: The Unix timestamp (in seconds) for when the message was created. type: integer thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. - type: string - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string status: - description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. - type: string - enum: - [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", - ] - required_action: - type: object - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - properties: - type: - description: For now, this is always `submit_tool_outputs`. - type: string - enum: ["submit_tool_outputs"] - submit_tool_outputs: - type: object - description: Details on the tool outputs needed for this run to continue. - properties: - tool_calls: - type: array - description: A list of the relevant tool calls. - items: - $ref: "#/components/schemas/RunToolCallObject" - required: - - tool_calls - required: - - type - - submit_tool_outputs - last_error: - type: object - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expires_at: - description: The Unix timestamp (in seconds) for when the run will expire. - type: integer - nullable: true - started_at: - description: The Unix timestamp (in seconds) for when the run was started. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run was completed. - type: integer - nullable: true + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + type: string + enum: [ "in_progress", "incomplete", "completed" ] incomplete_details: - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + description: On an incomplete message, details about why the message is incomplete. type: object - nullable: true properties: reason: - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] - model: - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. - type: string - instructions: - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + description: The reason the message is incomplete. + enum: + [ + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", + ] + nullable: true + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. type: string - tools: - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. type: array - maxItems: 20 items: oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + type: string + nullable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - usage: - $ref: "#/components/schemas/RunCompletionUsage" - temperature: - description: The sampling temperature used for this run. If not set, defaults to 1. - type: number - nullable: true - top_p: - description: The nucleus sampling value used for this run. If not set, defaults to 1. - type: number - nullable: true - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens specified to have been used over the course of the run. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens specified to have been used over the course of the run. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true required: - id - object - created_at - thread_id - - assistant_id - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at + - incomplete_details - completed_at - - model - - instructions - - tools + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - response_format x-oaiMeta: - name: The run object + name: The message object beta: true example: | { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1698107661, - "assistant_id": "asst_abc123", + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, "thread_id": "thread_abc123", - "status": "completed", - "started_at": 1699073476, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699073498, - "last_error": null, - "model": "gpt-4-turbo", - "instructions": null, - "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], - "metadata": {}, - "incomplete_details": null, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto" + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} } - CreateRunRequest: + + MessageDeltaObject: type: object - additionalProperties: false + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + id: + description: The identifier of the message, which can be referenced in API endpoints. type: string - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + object: + description: The object type, which is always `thread.message.delta`. type: string - nullable: true - additional_instructions: - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + enum: [ "thread.message.delta" ] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: [ "user", "assistant" ] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: type: string - nullable: true - additional_messages: - description: Adds additional messages to the thread before creating the run. + enum: [ "user", "assistant" ] + description: | + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: type: array items: - $ref: "#/components/schemas/CreateMessageRequest" - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 + + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - We generally recommend altering this or temperature but not both. - stream: + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + object: + type: string + enum: [ thread.message.deleted ] required: - - thread_id - - assistant_id - ListRunsResponse: - type: object + - id + - object + - deleted + + ListMessagesResponse: properties: object: type: string @@ -10499,13 +13491,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "run_abc123" + example: "msg_abc123" last_id: type: string - example: "run_abc456" + example: "msg_abc123" has_more: type: boolean example: false @@ -10515,674 +13507,505 @@ components: - first_id - last_id - has_more - ModifyRunRequest: + + MessageContentImageFileObject: + title: Image file type: object - additionalProperties: false + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - metadata: - description: *metadata_description + type: + description: Always `image_file`. + type: string + enum: [ "image_file" ] + image_file: type: object - x-oaiTypeLabel: map - nullable: true - SubmitToolOutputsRunRequest: + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - file_id + required: + - type + - image_file + + MessageDeltaContentImageFileObject: + title: Image file type: object - additionalProperties: false + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - tool_outputs: - description: A list of tools for which the outputs are being submitted. - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: [ "image_file" ] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" required: - - tool_outputs + - index + - type + + MessageContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + type: + type: string + enum: [ "image_url" ] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - url + required: + - type + - image_url + + MessageDeltaContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: [ "image_url" ] + image_url: + type: object + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: [ "auto", "low", "high" ] + default: "auto" + required: + - index + - type - RunToolCallObject: + MessageContentTextObject: + title: Text type: object - description: Tool call objects + description: The text content that is part of a message. properties: - id: - type: string - description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: + description: Always `text`. type: string - description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] - function: + enum: [ "text" ] + text: type: object - description: The function definition. properties: - name: - type: string - description: The name of the function. - arguments: + value: + description: The data that makes up the text. type: string - description: The arguments that the model expects you to pass to the function. + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true required: - - name - - arguments + - value + - annotations required: - - id - type - - function + - text - CreateThreadAndRunRequest: + MessageContentRefusalObject: + title: Refusal type: object - additionalProperties: false + description: The refusal content generated by the assistant. properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: + description: Always `refusal`. type: string - thread: - $ref: "#/components/schemas/CreateThreadRequest" - description: If no thread is provided, an empty thread will be created. - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + enum: [ "refusal" ] + refusal: type: string - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + nullable: false required: - - thread_id - - assistant_id + - type + - refusal - ThreadObject: + MessageRequestContentTextObject: + title: Text type: object - title: Thread - description: Represents a thread that contains [messages](/docs/api-reference/messages). + description: The text content that is part of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. + type: + description: Always `text`. type: string - object: - description: The object type, which is always `thread`. + enum: [ "text" ] + text: type: string - enum: ["thread"] - created_at: - description: The Unix timestamp (in seconds) for when the thread was created. - type: integer - tool_resources: + description: Text content to be sent to the model + required: + - type + - text + + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + file_id: + description: The ID of the specific File the citation is from. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - id - - object - - created_at - - tool_resources - - metadata - x-oaiMeta: - name: The thread object - beta: true - example: | - { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} - } + - type + - text + - file_citation + - start_index + - end_index - CreateThreadRequest: + MessageContentTextAnnotationsFilePathObject: + title: File path type: object - additionalProperties: false + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: - messages: - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - tool_resources: + type: + description: Always `file_path`. + type: string + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - oneOf: - - required: [vector_store_ids] - - required: [vector_stores] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index - ModifyThreadRequest: + MessageDeltaContentTextObject: + title: Text type: object - additionalProperties: false + description: The text content that is part of a message. properties: - tool_resources: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: [ "text" ] + text: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type - DeleteThreadResponse: + MessageDeltaContentRefusalObject: + title: Refusal type: object + description: The refusal content that is part of a message. properties: - id: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. type: string - deleted: - type: boolean - object: + enum: [ "refusal" ] + refusal: type: string - enum: [thread.deleted] required: - - id - - object - - deleted + - index + - type - ListThreadsResponse: + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - object: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/ThreadObject" - first_id: + enum: [ "file_citation" ] + text: + description: The text in the message content that needs to be replaced. type: string - example: "asst_abc123" - last_id: + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. type: string - example: "asst_abc456" - has_more: - type: boolean - example: false + enum: [ "file_path" ] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - object - - data - - first_id - - last_id - - has_more + - index + - type - MessageObject: + RunStepObject: type: object - title: The message object - description: Represents a message within a [thread](/docs/api-reference/threads). + title: Run steps + description: | + Represents a step in execution of a run. properties: id: - description: The identifier, which can be referenced in API endpoints. + description: The identifier of the run step, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.message`. + description: The object type, which is always `thread.run.step`. type: string - enum: ["thread.message"] + enum: [ "thread.run.step" ] created_at: - description: The Unix timestamp (in seconds) for when the message was created. + description: The Unix timestamp (in seconds) for when the run step was created. type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string thread_id: - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + description: The ID of the [thread](/docs/api-reference/threads) that was run. type: string - status: - description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. type: string - enum: ["in_progress", "incomplete", "completed"] - incomplete_details: - description: On an incomplete message, details about why the message is incomplete. + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: [ "message_creation", "tool_calls" ] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: [ "in_progress", "cancelled", "failed", "completed", "expired" ] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true properties: - reason: + code: type: string - description: The reason the message is incomplete. - enum: - [ - "content_filter", - "max_tokens", - "run_cancelled", - "run_expired", - "run_failed", - ] - nullable: true + description: One of `server_error` or `rate_limit_exceeded`. + enum: [ "server_error", "rate_limit_exceeded" ] + message: + type: string + description: A human-readable description of the error. required: - - reason - completed_at: - description: The Unix timestamp (in seconds) for when the message was completed. + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. type: integer nullable: true - incomplete_at: - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. type: integer nullable: true - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: ["user", "assistant"] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageContentTextObject" - x-oaiExpandable: true - assistant_id: - description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. - type: string - nullable: true - run_id: - description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. - type: string + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they were added to. + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" required: - id - object - created_at + - assistant_id - thread_id + - run_id + - type - status - - incomplete_details + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - metadata + - usage x-oaiMeta: - name: The message object + name: The run step object beta: true - example: | - { - "id": "msg_abc123", - "object": "thread.message", - "created_at": 1698983503, - "thread_id": "thread_abc123", - "role": "assistant", - "content": [ - { - "type": "text", - "text": { - "value": "Hi! How can I help you today?", - "annotations": [] - } - } - ], - "assistant_id": "asst_abc123", - "run_id": "run_abc123", - "attachments": [], - "metadata": {} - } + example: *run_step_object_example - MessageDeltaObject: + RunStepDeltaObject: type: object - title: Message delta object + title: Run step delta object description: | - Represents a message delta i.e. any changed fields on a message during streaming. + Represents a run step delta i.e. any changed fields on a run step during streaming. properties: id: - description: The identifier of the message, which can be referenced in API endpoints. + description: The identifier of the run step, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.message.delta`. + description: The object type, which is always `thread.run.step.delta`. type: string - enum: ["thread.message.delta"] + enum: [ "thread.run.step.delta" ] delta: - description: The delta containing the fields that have changed on the Message. + description: The delta containing the fields that have changed on the run step. type: object properties: - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: ["user", "assistant"] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" - x-oaiExpandable: true + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true required: - id - object - delta x-oaiMeta: - name: The message delta object + name: The run step delta object beta: true example: | { - "id": "msg_123", - "object": "thread.message.delta", + "id": "step_123", + "object": "thread.run.step.delta", "delta": { - "content": [ - { - "index": 0, - "type": "text", - "text": { "value": "Hello", "annotations": [] } - } - ] - } - } - - CreateMessageRequest: - type: object - additionalProperties: false - required: - - role - - content - properties: - role: - type: string - enum: ["user", "assistant"] - description: | - The role of the entity that is creating the message. Allowed values include: - - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. - content: - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). - title: Array of content parts - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageRequestContentTextObject" - x-oaiExpandable: true - minItems: 1 - x-oaiExpandable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they should be added to. - required: - - file_id - - tools - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - - ModifyMessageRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - - DeleteMessageResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: [thread.message.deleted] - required: - - id - - object - - deleted + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } - ListMessagesResponse: + ListRunStepsResponse: properties: object: type: string @@ -11190,13 +14013,13 @@ components: data: type: array items: - $ref: "#/components/schemas/MessageObject" + $ref: "#/components/schemas/RunStepObject" first_id: type: string - example: "msg_abc123" + example: "step_abc123" last_id: type: string - example: "msg_abc123" + example: "step_abc456" has_more: type: boolean example: false @@ -11207,391 +14030,482 @@ components: - last_id - has_more - MessageContentImageFileObject: - title: Image file + RunStepDetailsMessageCreationObject: + title: Message creation type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Details of the message creation by the run step. properties: type: - description: Always `image_file`. + description: Always `message_creation`. type: string - enum: ["image_file"] - image_file: + enum: [ "message_creation" ] + message_creation: type: object properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: + message_id: type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" + description: The ID of the message that was created by this run step. required: - - file_id + - message_id required: - type - - image_file + - message_creation - MessageDeltaContentImageFileObject: - title: Image file + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Details of the message creation by the run step. properties: - index: - type: integer - description: The index of the content part in the message. type: - description: Always `image_file`. + description: Always `message_creation`. type: string - enum: ["image_file"] - image_file: + enum: [ "message_creation" ] + message_creation: type: object properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: + message_id: type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" + description: The ID of the message that was created by this run step. required: - - index - type - MessageContentImageUrlObject: - title: Image URL + RunStepDetailsToolCallsObject: + title: Tool calls type: object - description: References an image URL in the content of a message. + description: Details of the tool call. properties: type: + description: Always `tool_calls`. type: string - enum: ["image_url"] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - format: uri - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] - default: "auto" - required: - - url + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true required: - type - - image_url + - tool_calls - MessageDeltaContentImageUrlObject: - title: Image URL + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls type: object - description: References an image URL in the content of a message. + description: Details of the tool call. properties: - index: - type: integer - description: The index of the content part in the message. type: - description: Always `image_url`. + description: Always `tool_calls`. type: string - enum: ["image_url"] - image_url: + enum: [ "tool_calls" ] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs properties: - url: - description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - type: string - detail: + input: type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true required: - - index + - id - type + - code_interpreter - MessageContentTextObject: - title: Text + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call type: object - description: The text content that is part of a message. + description: Details of the Code Interpreter tool call the run step was involved in. properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. type: - description: Always `text`. type: string - enum: ["text"] - text: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: [ "code_interpreter" ] + code_interpreter: type: object + description: The Code Interpreter tool call definition. properties: - value: - description: The data that makes up the text. + input: type: string - annotations: + description: The input to the Code Interpreter tool call. + outputs: type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. items: + type: object oneOf: - - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" x-oaiExpandable: true - required: - - value - - annotations required: + - index - type - - text - MessageRequestContentTextObject: - title: Text + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output type: object - description: The text content that is part of a message. + description: Text output from the Code Interpreter tool call as part of a run step. properties: type: - description: Always `text`. + description: Always `logs`. type: string - enum: ["text"] - text: + enum: [ "logs" ] + logs: type: string - description: Text content to be sent to the model + description: The text output from the Code Interpreter tool call. required: - type - - text + - logs - MessageContentTextAnnotationsFileCitationObject: - title: File citation + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + description: Text output from the Code Interpreter tool call as part of a run step. properties: + index: + type: integer + description: The index of the output in the outputs array. type: - description: Always `file_citation`. + description: Always `logs`. type: string - enum: ["file_citation"] - text: - description: The text in the message content that needs to be replaced. + enum: [ "logs" ] + logs: type: string - file_citation: + description: The text output from the Code Interpreter tool call. + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: [ "image" ] + image: type: object properties: file_id: - description: The ID of the specific File the citation is from. - type: string - quote: - description: The specific quote in the file. + description: The [file](/docs/api-reference/files) ID of the image. type: string required: - file_id - - quote - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 required: - type - - text - - file_citation - - start_index - - end_index + - image - MessageContentTextAnnotationsFilePathObject: - title: File path + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: + index: + type: integer + description: The index of the output in the outputs array. type: - description: Always `file_path`. - type: string - enum: ["file_path"] - text: - description: The text in the message content that needs to be replaced. + description: Always `image`. type: string - file_path: + enum: [ "image" ] + image: type: object properties: file_id: - description: The ID of the file that was generated. + description: The [file](/docs/api-reference/files) ID of the image. type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 required: + - index - type - - text - - file_path - - start_index - - end_index - MessageDeltaContentTextObject: - title: Text + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: The text content that is part of a message. properties: - index: - type: integer - description: The index of the content part in the message. + id: + type: string + description: The ID of the tool call object. type: - description: Always `text`. type: string - enum: ["text"] - text: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map properties: - value: - description: The data that makes up the text. - type: string - annotations: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject" + results: type: array + description: The results of the file search. items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" - x-oaiExpandable: true + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" required: - - index + - id - type + - file_search - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + type: object + description: The ranking options for the file search. + properties: + ranker: + type: string + description: The ranker used for the file search. + enum: [ "default_2024_08_21" ] + score_threshold: + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + type: object + description: A result instance of the file search. + x-oaiTypeLabel: map + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only included if requested via the include query parameter. + items: + type: object + properties: + type: + type: string + description: The type of the content. + enum: [ "text" ] + text: + type: string + description: The text content of the file. + required: + - file_id + - file_name + - score + + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: index: type: integer - description: The index of the annotation in the text content part. + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. type: - description: Always `file_citation`. type: string - enum: ["file_citation"] - text: - description: The text in the message content that needs to be replaced. + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: [ "file_search" ] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: type: string - file_citation: + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - file_id: - description: The ID of the specific File the citation is from. + name: type: string - quote: - description: The specific quote in the file. + description: The name of the function. + arguments: type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - name + - arguments + - output required: - - index + - id - type + - function - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + RunStepDeltaStepDetailsToolCallsFunctionObject: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + title: Function tool call properties: index: type: integer - description: The index of the annotation in the text content part. - type: - description: Always `file_path`. + description: The index of the tool call in the tool calls array. + id: type: string - enum: ["file_path"] - text: - description: The text in the message content that needs to be replaced. + description: The ID of the tool call object. + type: type: string - file_path: + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: [ "function" ] + function: type: object + description: The definition of the function that was called. properties: - file_id: - description: The ID of the file that was generated. + name: type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true required: - index - type - RunStepObject: + VectorStoreExpirationAfter: type: object - title: Run steps - description: | - Represents a step in execution of a run. + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + type: string + enum: [ "last_active_at" ] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. properties: id: - description: The identifier of the run step, which can be referenced in API endpoints. + description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `thread.run.step`. + description: The object type, which is always `vector_store`. type: string - enum: ["thread.run.step"] + enum: [ "vector_store" ] created_at: - description: The Unix timestamp (in seconds) for when the run step was created. + description: The Unix timestamp (in seconds) for when the vector store was created. type: integer - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - type: string - thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was run. - type: string - run_id: - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. - type: string - type: - description: The type of run step, which can be either `message_creation` or `tool_calls`. - type: string - enum: ["message_creation", "tool_calls"] - status: - description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + name: + description: The name of the vector store. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" - x-oaiExpandable: true - last_error: + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: type: object - description: The last error associated with this run step. Will be `null` if there are no errors. - nullable: true properties: - code: - type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expired_at: - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run step was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run step failed. + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + type: string + enum: [ "expired", "in_progress", "completed" ] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. type: integer nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run step completed. + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. type: integer nullable: true metadata: @@ -11599,81 +14513,85 @@ components: type: object x-oaiTypeLabel: map nullable: true - usage: - $ref: "#/components/schemas/RunStepCompletionUsage" required: - id - object + - usage_bytes - created_at - - assistant_id - - thread_id - - run_id - - type - status - - step_details - - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at + - last_active_at + - name + - file_counts - metadata - - usage x-oaiMeta: - name: The run step object + name: The vector store object beta: true - example: *run_step_object_example + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } - RunStepDeltaObject: + CreateVectorStoreRequest: type: object - title: Run step delta object - description: | - Represents a run step delta i.e. any changed fields on a run step during streaming. + additionalProperties: false properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. type: string - object: - description: The object type, which is always `thread.run.step.delta`. + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. type: string - enum: ["thread.run.step.delta"] - delta: - description: The delta containing the fields that have changed on the run step. + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: *metadata_description type: object - properties: - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" - x-oaiExpandable: true - required: - - id - - object - - delta - x-oaiMeta: - name: The run step delta object - beta: true - example: | - { - "id": "step_123", - "object": "thread.run.step.delta", - "delta": { - "step_details": { - "type": "tool_calls", - "tool_calls": [ - { - "index": 0, - "id": "call_123", - "type": "code_interpreter", - "code_interpreter": { "input": "", "outputs": [] } - } - ] - } - } - } + x-oaiTypeLabel: map + nullable: true - ListRunStepsResponse: + ListVectorStoresResponse: properties: object: type: string @@ -11681,13 +14599,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunStepObject" + $ref: "#/components/schemas/VectorStoreObject" first_id: type: string - example: "step_abc123" + example: "vs_abc123" last_id: type: string - example: "step_abc456" + example: "vs_abc456" has_more: type: boolean example: false @@ -11698,385 +14616,259 @@ components: - last_id - has_more - RunStepDetailsMessageCreationObject: - title: Message creation - type: object - description: Details of the message creation by the run step. - properties: - type: - description: Always `message_creation`. - type: string - enum: ["message_creation"] - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - message_id - required: - - type - - message_creation - - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - type: object - description: Details of the message creation by the run step. - properties: - type: - description: Always `message_creation`. - type: string - enum: ["message_creation"] - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - type - - RunStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: ["tool_calls"] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - tool_calls - - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. - type: string - enum: ["tool_calls"] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call + DeleteVectorStoreResponse: type: object - description: Details of the Code Interpreter tool call the run step was involved in. properties: id: type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] - code_interpreter: - type: object - description: The Code Interpreter tool call definition. - required: - - input - - outputs - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true + deleted: + type: boolean + object: + type: string + enum: [ vector_store.deleted ] required: - id - - type - - code_interpreter + - object + - deleted - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call + VectorStoreFileObject: type: object - description: Details of the Code Interpreter tool call the run step was involved in. + title: Vector store files + description: A list of files attached to a vector store. properties: - index: - type: integer - description: The index of the tool call in the tool calls array. id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The ID of the tool call. - type: + object: + description: The object type, which is always `vector_store.file`. type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] - code_interpreter: + enum: [ "vector_store.file" ] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + type: string + enum: [ "in_progress", "completed", "cancelled", "failed" ] + last_error: type: object - description: The Code Interpreter tool call definition. + description: The last error associated with this vector store file. Will be `null` if there are no errors. + nullable: true properties: - input: + code: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true + description: One of `server_error` or `rate_limit_exceeded`. + enum: + [ + "server_error", + "unsupported_file", + "invalid_file", + ] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true required: - - index - - type + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output + OtherChunkingStrategyResponseParam: type: object - description: Text output from the Code Interpreter tool call as part of a run step. + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false properties: type: - description: Always `logs`. - type: string - enum: ["logs"] - logs: type: string - description: The text output from the Code Interpreter tool call. + description: Always `other`. + enum: [ "other" ] required: - type - - logs - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output + StaticChunkingStrategyResponseParam: type: object - description: Text output from the Code Interpreter tool call as part of a run step. + title: Static Chunking Strategy + additionalProperties: false properties: - index: - type: integer - description: The index of the output in the outputs array. type: - description: Always `logs`. - type: string - enum: ["logs"] - logs: type: string - description: The text output from the Code Interpreter tool call. + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" required: - - index - type + - static - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output + StaticChunkingStrategy: type: object + additionalProperties: false properties: - type: - description: Always `image`. - type: string - enum: ["image"] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string - required: - - file_id + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. required: - - type - - image + - max_chunk_size_tokens + - chunk_overlap_tokens - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output + AutoChunkingStrategyRequestParam: type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false properties: - index: - type: integer - description: The index of the output in the outputs array. type: - description: Always `image`. type: string - enum: ["image"] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string + description: Always `auto`. + enum: [ "auto" ] required: - - index - type - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call + StaticChunkingStrategyRequestParam: type: object + title: Static Chunking Strategy + additionalProperties: false properties: - id: - type: string - description: The ID of the tool call object. type: type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map + description: Always `static`. + enum: [ "static" ] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" required: - - id - type - - file_search + - static - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call + ChunkingStrategyRequestParam: type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - required: - - index - - type - - file_search + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true - RunStepDetailsToolCallsFunctionObject: + CreateVectorStoreFileRequest: type: object - title: Function tool call + additionalProperties: false properties: - id: - type: string - description: The ID of the tool call object. - type: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] - function: - type: object - description: The definition of the function that was called. - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true - required: - - name - - arguments - - output + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" required: - - id - - type - - function + - file_id - RunStepDeltaStepDetailsToolCallsFunctionObject: - type: object - title: Function tool call + ListVectorStoreFilesResponse: properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + object: type: string - description: The ID of the tool call object. - type: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreFileObject" + first_id: type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] - function: - type: object - description: The definition of the function that was called. - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + example: "file-abc123" + last_id: + type: string + example: "file-abc456" + has_more: + type: boolean + example: false required: - - index - - type + - object + - data + - first_id + - last_id + - has_more - VectorStoreExpirationAfter: + DeleteVectorStoreFileResponse: type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + id: type: string - enum: ["last_active_at"] - days: - description: The number of days after the anchor time that the vector store will expire. - type: integer - minimum: 1 - maximum: 365 + deleted: + type: boolean + object: + type: string + enum: [ vector_store.file.deleted ] required: - - anchor - - days + - id + - object + - deleted - VectorStoreObject: + VectorStoreFileBatchObject: type: object - title: Vector store - description: A vector store is a collection of processed files can be used by the `file_search` tool. + title: Vector store file batch + description: A batch of files attached to a vector store. properties: id: description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store`. + description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store"] + enum: [ "vector_store.files_batch" ] created_at: - description: The Unix timestamp (in seconds) for when the vector store was created. + description: The Unix timestamp (in seconds) for when the vector store files batch was created. type: integer - name: - description: The name of the vector store. + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. - type: integer + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: [ "in_progress", "completed", "cancelled", "failed" ] file_counts: type: object properties: @@ -12084,13 +14876,13 @@ components: description: The number of files that are currently being processed. type: integer completed: - description: The number of files that have been successfully processed. + description: The number of files that have been processed. type: integer failed: description: The number of files that have failed to process. type: integer cancelled: - description: The number of files that were cancelled. + description: The number of files that where cancelled. type: integer total: description: The total number of files. @@ -12098,910 +14890,1678 @@ components: required: - in_progress - completed - - failed - cancelled + - failed - total - status: - description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. - type: string - enum: ["expired", "in_progress", "completed"] - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will expire. - type: integer - nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last active. - type: integer - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true required: - id - object - - usage_bytes - created_at + - vector_store_id - status - - last_active_at - - name - file_counts - - metadata x-oaiMeta: - name: The vector store object + name: The vector store files batch object beta: true example: | { - "id": "vs_123", - "object": "vector_store", + "id": "vsfb_123", + "object": "vector_store.files_batch", "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", + "vector_store_id": "vs_abc123", "status": "completed", "file_counts": { "in_progress": 0, "completed": 100, - "cancelled": 0, "failed": 0, + "cancelled": 0, "total": 100 - }, - "metadata": {}, - "last_used_at": 1698107661 + } } - CreateVectorStoreRequest: + CreateVectorStoreFileBatchRequest: type: object additionalProperties: false properties: file_ids: description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. type: array + minItems: 1 maxItems: 500 items: type: string - name: - description: The name of the vector store. - type: string - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids - UpdateVectorStoreRequest: - type: object - additionalProperties: false - properties: - name: - description: The name of the vector store. - type: string - nullable: true - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true - ListVectorStoresResponse: - properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/VectorStoreObject" - first_id: - type: string - example: "vs_abc123" - last_id: - type: string - example: "vs_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.created" ] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" - DeleteVectorStoreResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: [vector_store.deleted] - required: - - id - - object - - deleted + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.run.created" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.queued" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.in_progress" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.requires_action" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.completed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.failed" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.cancelling" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.cancelled" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.expired" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - VectorStoreFileObject: - type: object - title: Vector store files - description: A list of files attached to a vector store. - properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store.file`. - type: string - enum: ["vector_store.file"] - usage_bytes: - description: The total vector store usage in bytes. Note that this may be different from the original file size. - type: integer - created_at: - description: The Unix timestamp (in seconds) for when the vector store file was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. - type: string - enum: ["in_progress", "completed", "cancelled", "failed"] - last_error: - type: object - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true + RunStepStreamEvent: + oneOf: + - type: object properties: - code: + event: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: - [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", - ] - message: + enum: [ "thread.run.step.created" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: type: string - description: A human-readable description of the error. + enum: [ "thread.run.step.in_progress" ] + data: + $ref: "#/components/schemas/RunStepObject" required: - - code - - message - required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error - x-oaiMeta: - name: The vector store file object - beta: true - example: | - { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null - } + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.delta" ] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.completed" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.failed" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.cancelled" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.step.expired" ] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" - CreateVectorStoreFileRequest: - type: object - additionalProperties: false - properties: - file_id: - description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. - type: string - required: - - file_id + MessageStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: [ "thread.message.created" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.in_progress" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.delta" ] + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.completed" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.message.incomplete" ] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - ListVectorStoreFilesResponse: + ErrorEvent: + type: object properties: - object: + event: type: string - example: "list" + enum: [ "error" ] data: - type: array - items: - $ref: "#/components/schemas/VectorStoreFileObject" - first_id: - type: string - example: "file-abc123" - last_id: - type: string - example: "file-abc456" - has_more: - type: boolean - example: false + $ref: "#/components/schemas/Error" required: - - object + - event - data - - first_id - - last_id - - has_more + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" - DeleteVectorStoreFileResponse: + DoneEvent: type: object properties: - id: + event: type: string - deleted: - type: boolean - object: + enum: [ "done" ] + data: type: string - enum: [vector_store.file.deleted] + enum: [ "[DONE]" ] required: - - id - - object - - deleted + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" - VectorStoreFileBatchObject: + Batch: type: object - title: Vector store file batch - description: A batch of files attached to a vector store. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string object: - description: The object type, which is always `vector_store.file_batch`. type: string - enum: ["vector_store.files_batch"] - created_at: - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + enum: [ batch ] + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: type: string + description: The time frame within which the batch should be processed. status: - description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] - file_counts: + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: type: object properties: - in_progress: - description: The number of files that are currently being processed. + total: type: integer + description: Total number of requests in the batch. completed: - description: The number of files that have been processed. type: integer + description: Number of requests that have been completed successfully. failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. type: integer + description: Number of requests that have failed. required: - - in_progress + - total - completed - - cancelled - failed - - total + description: The request counts for different statuses within the batch. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - - created_at - - vector_store_id + - endpoint + - input_file_id + - completion_window - status - - file_counts + - created_at x-oaiMeta: - name: The vector store files batch object - beta: true + name: The batch object + example: *batch_object + + BatchRequestInput: + type: object + description: The per-line object of the batch input file + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: [ "POST" ] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + x-oaiMeta: + name: The request input object example: | - { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 - } - } + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} - CreateVectorStoreFileBatchRequest: + BatchRequestOutput: type: object - additionalProperties: false + description: The per-line object of the batch output and error files properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object + nullable: true + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: | + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + + ListBatchesResponse: + type: object + properties: + data: type: array - minItems: 1 - maxItems: 500 items: - type: string + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: "batch_abc123" + last_id: + type: string + example: "batch_abc456" + has_more: + type: boolean + object: + type: string + enum: [ list ] required: - - file_ids + - object + - data + - has_more - AssistantStreamEvent: - description: | - Represents an event emitted when streaming a Run. + AuditLogActorServiceAccount: + type: object + description: The service account that performed the audit logged action. + properties: + id: + type: string + description: The service account id. - Each event in a server-sent events stream has an `event` and `data` property: + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. - ``` - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - ``` + AuditLogActorApiKey: + type: object + description: The API Key used to perform the audit logged action. + properties: + id: + type: string + description: The tracking id of the API key. + type: + type: string + description: The type of API key. Can be either `user` or `service_account`. + enum: [ "user", "service_account" ] + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit `thread.run.created` when a new run - is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses - to create a message during a run, we emit a `thread.message.created event`, a - `thread.message.in_progress` event, many `thread.message.delta` events, and finally a - `thread.message.completed` event. + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: + type: string + description: The IP address from which the action was performed. - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to - integrate the Assistants API with streaming. - oneOf: - - $ref: "#/components/schemas/ThreadStreamEvent" - - $ref: "#/components/schemas/RunStreamEvent" - - $ref: "#/components/schemas/RunStepStreamEvent" - - $ref: "#/components/schemas/MessageStreamEvent" - - $ref: "#/components/schemas/ErrorEvent" - - $ref: "#/components/schemas/DoneEvent" - x-oaiMeta: - name: Assistant stream events - beta: true + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: + type: string + description: The type of actor. Is either `session` or `api_key`. + enum: [ "session", "api_key" ] + session: + type: object + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" - ThreadStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: ["thread.created"] - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" - RunStreamEvent: - oneOf: - - type: object + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not scoped to projects. properties: - event: + id: type: string - enum: ["thread.run.created"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a new [run](/docs/api-reference/runs/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: + description: The project ID. + name: type: string - enum: ["thread.run.queued"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.in_progress"] + description: The tracking ID of the API key. data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.requires_action"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.completed"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.failed"] + description: The ID of the invite. data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + invite.accepted: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.cancelling"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.cancelled"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. properties: - event: + error_code: type: string - enum: ["thread.run.expired"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - RunStepStreamEvent: - oneOf: - - type: object - properties: - event: + description: The error code of the failure. + error_message: type: string - enum: ["thread.run.step.created"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. properties: - event: + error_code: type: string - enum: ["thread.run.step.in_progress"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: + description: The error code of the failure. + error_message: type: string - enum: ["thread.run.step.delta"] - data: - $ref: "#/components/schemas/RunStepDeltaObject" - required: - - event - - data - description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - - type: object + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.step.completed"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.step.failed"] + description: The project ID. data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.step.cancelled"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.run.step.expired"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - MessageStreamEvent: - oneOf: - - type: object + description: The project ID. + service_account.created: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.message.created"] + description: The service account ID. data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.message.in_progress"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.message.delta"] - data: - $ref: "#/components/schemas/MessageDeltaObject" - required: - - event - - data - description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - - type: object + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.message.completed"] + description: The user ID. data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. properties: - event: + id: type: string - enum: ["thread.message.incomplete"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + required: + - id + - type + - effective_at + - actor + x-oaiMeta: + name: The audit log object + example: | + { + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } + } + } + + ListAuditLogsResponse: + type: object + properties: + object: + type: string + enum: [ list ] + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: + type: string + example: "audit_log-defb456h8dks" + last_id: + type: string + example: "audit_log-hnbkd8s93s" + has_more: + type: boolean + + required: + - object + - data + - first_id + - last_id + - has_more + + Invite: + type: object + description: Represents an individual `invite` to the organization. + properties: + object: + type: string + enum: [ organization.invite ] + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + status: + type: string + enum: [ accepted, expired, pending ] + description: "`accepted`,`expired`, or `pending`" + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + + InviteListResponse: + type: object + properties: + object: + type: string + enum: [ list ] + description: The object type, which is always `list` + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. + required: + - object + - data + + InviteRequest: + type: object + properties: + email: + type: string + description: "Send an email to this address" + role: + type: string + enum: [ reader, owner ] + description: "`owner` or `reader`" + required: + - email + - role + + InviteDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.invite.deleted ] + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + User: + type: object + description: Represents an individual `user` within an organization. + properties: + object: + type: string + enum: [ organization.user ] + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, reader ] + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + UserListResponse: + type: object + properties: + object: + type: string + enum: [ list ] + data: + type: array + items: + $ref: '#/components/schemas/User' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner,reader ] + description: "`owner` or `reader`" + required: + - role + + UserDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.user.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + Project: + type: object + description: Represents an individual project. + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: [ organization.project ] + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + status: + type: string + enum: [ active, archived ] + description: "`active` or `archived`" + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - id + - object + - name + - created_at + - status + x-oaiMeta: + name: The project object + example: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active", + "app_use_case": "Your project use case here", + "business_website": "https://example.com" + } - ErrorEvent: + ProjectListResponse: type: object properties: - event: + object: type: string - enum: ["error"] + enum: [ list ] data: - $ref: "#/components/schemas/Error" + type: array + items: + $ref: '#/components/schemas/Project' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - event + - object - data - description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + - first_id + - last_id + - has_more + + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name + + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + app_use_case: + type: string + description: A description of your business, project, or use case. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + business_website: + type: string + description: Your business URL, or if you don't have one yet, a URL to your LinkedIn or other social media. [Why we need this information](https://help.openai.com/en/articles/9824607-api-platform-verifications). + required: + - name + + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message + + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: [ organization.project.user ] + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. + + required: + - object + - id + - name + - email + - role + - added_at x-oaiMeta: - dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - DoneEvent: + ProjectUserListResponse: type: object properties: - event: + object: type: string - enum: ["done"] data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: + type: string + last_id: type: string - enum: ["[DONE]"] + has_more: + type: boolean required: - - event + - object - data - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: "`data` is `[DONE]`" + - first_id + - last_id + - has_more - Batch: + ProjectUserCreateRequest: + type: object + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - user_id + - role + + ProjectUserUpdateRequest: + type: object + properties: + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + required: + - role + + ProjectUserDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.user.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectServiceAccount: type: object + description: Represents an individual service account in a project. properties: + object: + type: string + enum: [ organization.project.service_account ] + description: The object type, which is always `organization.project.service_account` id: type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: [ owner, member ] + description: "`owner` or `member`" + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + ProjectServiceAccountListResponse: + type: object + properties: object: type: string - enum: [batch] - description: The object type, which is always `batch`. - endpoint: + enum: [ list ] + data: + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: type: string - description: The OpenAI API endpoint used by the batch. + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more - errors: - type: object - properties: - object: - type: string - description: The object type, which is always `list`. - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: The name of the parameter that caused the error, if applicable. - nullable: true - line: - type: integer - description: The line number of the input file where the error occurred, if applicable. - nullable: true - input_file_id: + ProjectServiceAccountCreateRequest: + type: object + properties: + name: type: string - description: The ID of the input file for the batch. - completion_window: + description: The name of the service account being created. + required: + - name + + ProjectServiceAccountCreateResponse: + type: object + properties: + object: type: string - description: The time frame within which the batch should be processed. - status: + enum: [ organization.project.service_account ] + id: type: string - description: The current status of the batch. - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - output_file_id: + name: type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + role: type: string - description: The ID of the file containing the outputs of requests with errors. + enum: [ member ] + description: Service accounts can only have one role of type `member` created_at: type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - required: - - total - - completed - - failed - description: The request counts for different statuses within the batch. - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' required: - - id - object - - endpoint - - input_file_id - - completion_window - - status + - id + - name + - role - created_at - x-oaiMeta: - name: The batch object - example: *batch_object + - api_key - BatchRequestInput: + ProjectServiceAccountApiKey: type: object - description: The per-line object of the batch input file properties: - custom_id: + object: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + enum: [ organization.project.service_account.api_key ] + description: The object type, which is always `organization.project.service_account.api_key` + + value: type: string - enum: ["POST"] - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + name: type: string - description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - x-oaiMeta: - name: The request input object - example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + created_at: + type: integer + id: + type: string + required: + - object + - value + - name + - created_at + - id - BatchRequestOutput: + ProjectServiceAccountDeleteResponse: type: object - description: The per-line object of the batch output and error files properties: + object: + type: string + enum: [ organization.project.service_account.deleted ] id: type: string - custom_id: + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectApiKey: + type: object + description: Represents an individual API key in a project. + properties: + object: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - nullable: true - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - x-oaiTypeLabel: map - description: The JSON body of the response - error: + enum: [ organization.project.api_key ] + description: The object type, which is always `organization.project.api_key` + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: type: object - nullable: true - description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: + type: type: string - description: A human-readable error message. + enum: [ user, service_account ] + description: "`user` or `service_account`" + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + required: + - object + - redacted_value + - name + - created_at + - id + - owner x-oaiMeta: - name: The request output object + name: The project API key object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "created_at": 1711471533 + } + } + } - ListBatchesResponse: + ProjectApiKeyListResponse: type: object properties: + object: + type: string + enum: [ list ] data: type: array items: - $ref: "#/components/schemas/Batch" + $ref: '#/components/schemas/ProjectApiKey' first_id: type: string - example: "batch_abc123" last_id: type: string - example: "batch_abc456" has_more: type: boolean - object: - type: string - enum: [list] required: - object - data + - first_id + - last_id - has_more + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: [ organization.project.api_key.deleted ] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + security: - - ApiKeyAuth: [] + - ApiKeyAuth: [ ] x-oaiMeta: navigationGroups: @@ -13009,6 +16569,8 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + - id: administration + title: Administration - id: legacy title: Legacy groups: @@ -13037,7 +16599,7 @@ x-oaiMeta: title: Audio description: | Learn how to turn audio into text or text into audio. - + Related guide: [Speech to text](/docs/guides/speech-to-text) navigationGroup: endpoints sections: @@ -13060,7 +16622,7 @@ x-oaiMeta: title: Chat description: | Given a list of messages comprising a conversation, the model will return a response. - + Related guide: [Chat Completions](/docs/guides/text-generation) navigationGroup: endpoints sections: @@ -13077,7 +16639,7 @@ x-oaiMeta: title: Embeddings description: | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - + Related guide: [Embeddings](/docs/guides/embeddings) navigationGroup: endpoints sections: @@ -13091,7 +16653,7 @@ x-oaiMeta: title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - + Related guide: [Fine-tune models](/docs/guides/fine-tuning) navigationGroup: endpoints sections: @@ -13113,6 +16675,12 @@ x-oaiMeta: - type: endpoint key: cancelFineTuningJob path: cancel + - type: object + key: FinetuneChatRequestInput + path: chat-input + - type: object + key: FinetuneCompletionRequestInput + path: completions-input - type: object key: FineTuningJob path: object @@ -13126,7 +16694,7 @@ x-oaiMeta: title: Batch description: | Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. - + Related guide: [Batch](/docs/guides/batch) navigationGroup: endpoints sections: @@ -13147,10 +16715,10 @@ x-oaiMeta: path: object - type: object key: BatchRequestInput - path: requestInput + path: request-input - type: object key: BatchRequestOutput - path: requestOutput + path: request-output - id: files title: Files description: | @@ -13175,11 +16743,35 @@ x-oaiMeta: - type: object key: OpenAIFile path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object - id: images title: Images description: | Given a prompt and/or an input image, the model will generate a new image. - + Related guide: [Image generation](/docs/guides/images) navigationGroup: endpoints sections: @@ -13217,7 +16809,7 @@ x-oaiMeta: title: Moderations description: | Given some input text, outputs if the model classifies it as potentially harmful across several categories. - + Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints sections: @@ -13227,12 +16819,14 @@ x-oaiMeta: - type: object key: CreateModerationResponse path: object + + - id: assistants title: Assistants beta: true description: | Build assistants that can call models and use tools to perform tasks. - + [Get started with the Assistants API](/docs/assistants) navigationGroup: assistants sections: @@ -13259,7 +16853,7 @@ x-oaiMeta: beta: true description: | Create threads that assistants can interact with. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13283,7 +16877,7 @@ x-oaiMeta: beta: true description: | Create messages within threads - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13310,7 +16904,7 @@ x-oaiMeta: beta: true description: | Represents an execution run on a thread. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13343,7 +16937,7 @@ x-oaiMeta: beta: true description: | Represents the steps (model and tool calls) taken during the run. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13361,7 +16955,7 @@ x-oaiMeta: beta: true description: | Vector stores are used to store files for use by the `file_search` tool. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13388,7 +16982,7 @@ x-oaiMeta: beta: true description: | Vector store files represent files inside a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13412,7 +17006,7 @@ x-oaiMeta: beta: true description: | Vector store file batches represent operations to add multiple files to a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13436,11 +17030,11 @@ x-oaiMeta: beta: true description: | Stream the result of executing a Run or resuming a Run after submitting tool outputs. - + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. - + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the [Assistants API quickstart](/docs/assistants/overview) to learn more. navigationGroup: assistants @@ -13454,6 +17048,175 @@ x-oaiMeta: - type: object key: AssistantStreamEvent path: events + + - id: administration + title: Overview + description: | + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in the + organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization) + navigationGroup: administration + + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + + - id: users + title: Users + description: | + Manage users and their role in an organization. Users will be automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + + - id: projects + title: Projects + description: | + Manage the projects within an orgnanization includes creation, updating, and archiving or projects. + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + + - id: project-users + title: Project Users + description: | + Manage users within a project, including adding, updating roles, and removing users. + Users cannot be removed from the Default project, unless they are being removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + + - id: project-service-accounts + title: Project Service Accounts + description: | + Manage service accounts within a project. A service account is a bot user that is not associated with a user. + If a user leaves an organization, their keys and membership in projects will no longer work. Service accounts + do not have this limitation. However, service accounts can also be deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + + - id: project-api-keys + title: Project API Keys + description: | + Manage API keys for a given project. Supports listing and deleting keys for users. + This API does not allow issuing keys for users, as users need to authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + + - id: audit-logs + title: Audit Logs + description: | + Logs of user actions and configuration changes within this organization. + + To log events, you must activate logging in the [Organization Settings](/settings/organization/general). + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: completions title: Completions legacy: true diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml index a6fd761e..afff8726 100644 --- a/packages/openai_dart/pubspec.yaml +++ b/packages/openai_dart/pubspec.yaml @@ -1,10 +1,10 @@ name: openai_dart -description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). -version: 0.3.2 +description: Dart client for the OpenAI API. Supports chat (GPT-4o, o1, etc.), completions, embeddings, images (DALL·E 3), assistants (threads,, vector stores, etc.), batch, fine-tuning, etc. +version: 0.4.2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:openai_dart homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai @@ -13,22 +13,22 @@ topics: - gpt environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - fetch_client: ^1.0.2 - freezed_annotation: ^2.4.1 - http: ^1.1.0 - json_annotation: ^4.8.1 + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 meta: ^1.11.0 dev_dependencies: - build_runner: ^2.4.9 - freezed: ^2.4.7 - json_serializable: ^6.7.1 + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 # openapi_spec: ^0.7.8 openapi_spec: git: url: https://github.com/davidmigloz/openapi_spec.git - ref: e14c0adaac69e9482e9b57f68fc7964032cdd44c - test: ^1.25.2 + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/openai_dart/test/openai_client_assistants_test.dart b/packages/openai_dart/test/openai_client_assistants_test.dart index cf622c2c..61479182 100644 --- a/packages/openai_dart/test/openai_client_assistants_test.dart +++ b/packages/openai_dart/test/openai_client_assistants_test.dart @@ -8,6 +8,8 @@ import 'package:test/test.dart'; // https://platform.openai.com/docs/assistants/overview void main() { + const defaultModel = 'gpt-4o'; + group('OpenAI Assistants API tests', timeout: const Timeout(Duration(minutes: 5)), () { late OpenAIClient client; @@ -23,14 +25,13 @@ void main() { }); Future createAssistant() async { - const model = 'gpt-4'; const name = 'Math Tutor'; const description = 'Help students with math homework'; const instructions = 'You are a personal math tutor. Write and run code to answer math questions.'; final res = await client.createAssistant( request: const CreateAssistantRequest( - model: AssistantModel.modelId(model), + model: AssistantModel.modelId(defaultModel), name: name, description: description, instructions: instructions, @@ -42,7 +43,7 @@ void main() { expect(res.createdAt, greaterThan(0)); expect(res.name, name); expect(res.description, description); - expect(res.model, model); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, instructions); expect(res.tools, hasLength(1)); final tool = res.tools.first; @@ -114,6 +115,7 @@ void main() { assistantId: assistantId, instructions: 'Please address the user as Jane Doe. The user has a premium account.', + model: const CreateRunRequestModel.modelId(defaultModel), ), ); expect(res.id, isNotNull); @@ -129,7 +131,7 @@ void main() { expect(res.cancelledAt, isNull); expect(res.failedAt, isNull); expect(res.completedAt, isNull); - expect(res.model, startsWith('gpt-4')); + expect(res.model, startsWith(defaultModel)); expect(res.instructions, isNotEmpty); expect(res.tools, hasLength(1)); expect(res.metadata, isEmpty); diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart index 9277c848..96c57c2a 100644 --- a/packages/openai_dart/test/openai_client_chat_test.dart +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -23,7 +23,7 @@ void main() { test('Test call chat completion API', () async { final models = [ - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ChatCompletionModels.gpt4, ]; @@ -73,7 +73,7 @@ void main() { test('Test call chat completion API with stop sequence', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -105,7 +105,7 @@ void main() { test('Test call chat completions API with max tokens', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -115,7 +115,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, ); final res = await client.createChatCompletion(request: request); expect(res.choices, isNotEmpty); @@ -123,12 +123,16 @@ void main() { res.choices.first.finishReason, ChatCompletionFinishReason.length, ); + expect( + res.usage?.completionTokensDetails?.reasoningTokens, + ChatCompletionFinishReason.length, + ); }); test('Test call chat completions API with other parameters', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -138,7 +142,7 @@ void main() { content: ChatCompletionUserMessageContent.string('Tell me a joke'), ), ], - maxTokens: 2, + maxCompletionTokens: 2, presencePenalty: 0.6, frequencyPenalty: 0.6, logitBias: {'50256': -100}, @@ -154,7 +158,7 @@ void main() { test('Test call chat completions streaming API', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( @@ -179,7 +183,7 @@ void main() { await for (final res in stream) { expect(res.id, isNotEmpty); expect(res.created, greaterThan(0)); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.object, isNotEmpty); if (res.choices.isNotEmpty) { expect(res.choices, hasLength(1)); @@ -224,7 +228,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -272,7 +276,7 @@ void main() { final request2 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -330,7 +334,7 @@ void main() { final request1 = CreateChatCompletionRequest( model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4oMini, ), messages: [ const ChatCompletionMessage.system( @@ -360,7 +364,7 @@ void main() { res.object, isNotEmpty, ); - expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.model, startsWith('gpt-4o-mini')); expect(res.choices, hasLength(1)); final choice = res.choices.first; expect(choice.index, 0); @@ -386,113 +390,50 @@ void main() { expect(count, greaterThan(1)); }); - test('Test call chat completions API functions', () async { - const function = FunctionObject( - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA', - }, - 'unit': { - 'type': 'string', - 'description': 'The unit of temperature to return', - 'enum': ['celsius', 'fahrenheit'], - }, - }, - 'required': ['location'], - }, - ); - - final request1 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, - ), - messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', - ), - const ChatCompletionMessage.user( - content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', - ), - ), - ], - functions: [function], - functionCall: ChatCompletionFunctionCall.function( - ChatCompletionFunctionCallOption(name: function.name), - ), - ); - final res1 = await client.createChatCompletion(request: request1); - expect(res1.choices, hasLength(1)); - - final choice1 = res1.choices.first; - - final aiMessage1 = choice1.message; - expect(aiMessage1.role, ChatCompletionMessageRole.assistant); - expect(aiMessage1.content, isNull); - expect(aiMessage1.functionCall, isNotNull); - - final functionCall = aiMessage1.functionCall!; - expect(functionCall.name, function.name); - expect(functionCall.arguments, isNotEmpty); - final arguments = json.decode( - functionCall.arguments, - ) as Map; - expect(arguments.containsKey('location'), isTrue); - expect(arguments['location'], contains('Boston')); - - final functionResult = { - 'temperature': '22', - 'unit': 'celsius', - 'description': 'Sunny', - }; - - final request2 = CreateChatCompletionRequest( - model: const ChatCompletionModel.model( - ChatCompletionModels.gpt35Turbo, + test('Test jsonObject response format', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.model( + ChatCompletionModels.gpt4oMini, ), messages: [ - const ChatCompletionMessage.system( - content: 'You are a helpful assistant.', + ChatCompletionMessage.system( + content: + 'You are a helpful assistant that extracts names from text ' + 'and returns them in a JSON array.', ), - const ChatCompletionMessage.user( + ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( - 'What’s the weather like in Boston right now?', + 'John, Mary, and Peter.', ), ), - ChatCompletionMessage.function( - name: function.name, - content: json.encode(functionResult), - ), ], - functions: [function], + temperature: 0, + responseFormat: ResponseFormat.jsonObject(), ); - final res2 = await client.createChatCompletion(request: request2); - expect(res2.choices, hasLength(1)); - - final choice2 = res2.choices.first; - expect(choice2.finishReason, ChatCompletionFinishReason.stop); - - final aiMessage2 = choice2.message; - expect(aiMessage2.role, ChatCompletionMessageRole.assistant); - expect(aiMessage2.content, contains('22')); - expect(aiMessage2.functionCall, isNull); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(1)); + final choice = res.choices.first; + final message = choice.message; + expect(message.role, ChatCompletionMessageRole.assistant); + final content = message.content; + final jsonContent = json.decode(content!) as Map; + final jsonName = jsonContent['names'] as List; + expect(jsonName, isList); + expect(jsonName, hasLength(3)); + expect(jsonName, contains('John')); + expect(jsonName, contains('Mary')); + expect(jsonName, contains('Peter')); }); - test('Test jsonObject response format', () async { + test('Test jsonSchema response format', () async { const request = CreateChatCompletionRequest( model: ChatCompletionModel.model( - ChatCompletionModels.gpt41106Preview, + ChatCompletionModels.gpt4oMini, ), messages: [ ChatCompletionMessage.system( content: - 'You are a helpful assistant. That extracts names from text ' - 'and returns them in a JSON array.', + 'You are a helpful assistant. That extracts names from text.', ), ChatCompletionMessage.user( content: ChatCompletionUserMessageContent.string( @@ -501,8 +442,25 @@ void main() { ), ], temperature: 0, - responseFormat: ChatCompletionResponseFormat( - type: ChatCompletionResponseFormatType.jsonObject, + responseFormat: ResponseFormat.jsonSchema( + jsonSchema: JsonSchemaObject( + name: 'Names', + description: 'A list of names', + strict: true, + schema: { + 'type': 'object', + 'properties': { + 'names': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + }, + }, + 'additionalProperties': false, + 'required': ['names'], + }, + ), ), ); final res = await client.createChatCompletion(request: request); diff --git a/packages/tavily_dart/.gitignore b/packages/tavily_dart/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/tavily_dart/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/tavily_dart/CHANGELOG.md b/packages/tavily_dart/CHANGELOG.md new file mode 100644 index 00000000..74cd20f8 --- /dev/null +++ b/packages/tavily_dart/CHANGELOG.md @@ -0,0 +1,11 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0 + + - **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047)) + +## 0.0.1-dev.1 + +- Bootstrap package. diff --git a/packages/tavily_dart/LICENSE b/packages/tavily_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/tavily_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/tavily_dart/README.md b/packages/tavily_dart/README.md new file mode 100644 index 00000000..a7cd6afd --- /dev/null +++ b/packages/tavily_dart/README.md @@ -0,0 +1,131 @@ +# Tavily Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Dart Client for the [Tavily](https://tavily.com) API (a search engine optimized for LLMs and RAG). + +## Features + +- Fully type-safe, [documented](https://pub.dev/documentation/tavily_dart/latest) and tested +- All platforms supported +- Custom base URL, headers and query params support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) + +**Supported endpoints:** +- Search + +## Table of contents + +- [Usage](#usage) + * [Authentication](#authentication) + * [Search](#search) +- [Advance Usage](#advance-usage) + * [Custom HTTP client](#custom-http-client) + * [Using a proxy](#using-a-proxy) + + [HTTP proxy](#http-proxy) + + [SOCKS5 proxy](#socks5-proxy) +- [Acknowledgements](#acknowledgements) +- [License](#license) + +## Usage + +Refer to the [documentation](https://docs.tavily.com) for more information about the API. + +### Authentication + +The Tavily API uses API keys for authentication. Visit the [Tavily console](https://app.tavily.com/) to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final apiKey = Platform.environment['TAVILY_API_KEY']; +final client = TavilyClient(); +``` + +### Search + +Search for data based on a query. + +**Basic search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), +); +print(res); +``` + +**Advanced search:** + +```dart +final res = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), +); +print(res); +``` + +See the API documentation for more information on all supported search parameters. + +## Advance Usage + +### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = TavilyClient( + client: MyHttpClient(), +); +``` + +### Using a proxy + +#### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = TavilyClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +#### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = TavilyClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. + +## License + +Tavily Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/tavily_dart/analysis_options.yaml b/packages/tavily_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/tavily_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/tavily_dart/build.yaml b/packages/tavily_dart/build.yaml new file mode 100644 index 00000000..dee719ac --- /dev/null +++ b/packages/tavily_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + - unnecessary_null_checks + json_serializable: + options: + explicit_to_json: true diff --git a/packages/tavily_dart/example/tavily_dart_example.dart b/packages/tavily_dart/example/tavily_dart_example.dart new file mode 100644 index 00000000..652564b2 --- /dev/null +++ b/packages/tavily_dart/example/tavily_dart_example.dart @@ -0,0 +1,28 @@ +// ignore_for_file: avoid_print +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; + +void main() async { + final apiKey = Platform.environment['TAVILY_API_KEY']!; + final client = TavilyClient(); + + // Basic search + final res1 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + ), + ); + print(res1); + + // Advanced search + final res2 = await client.search( + request: SearchRequest( + apiKey: apiKey, + query: 'Should I invest in Apple right now?', + searchDepth: SearchRequestSearchDepth.advanced, + ), + ); + print(res2); +} diff --git a/packages/tavily_dart/lib/src/generated/client.dart b/packages/tavily_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..f6fb0439 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/client.dart @@ -0,0 +1,382 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target, unused_import + +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; +import 'package:meta/meta.dart'; + +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: TavilyClientException +// ========================================== + +/// HTTP exception handler for TavilyClient +class TavilyClientException implements Exception { + TavilyClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + final String message; + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'TavilyClientException($s)'; + } +} + +// ========================================== +// CLASS: TavilyClient +// ========================================== + +/// Client for Tavily API (v.1.0.0) +/// +/// Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. +class TavilyClient { + /// Creates a new TavilyClient instance. + /// + /// - [TavilyClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [TavilyClient.headers] Global headers to be sent with every request + /// - [TavilyClient.queryParams] Global query parameters to be sent with every request + /// - [TavilyClient.client] Override HTTP client to use for requests + TavilyClient({ + this.baseUrl, + this.headers = const {}, + this.queryParams = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// Global query parameters to be sent with every request + final Map queryParams; + + /// HTTP client for requests + final http.Client client; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _jsonDecode + // ------------------------------------------ + + dynamic _jsonDecode(http.Response r) { + return json.decode(utf8.decode(r.bodyBytes)); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + @protected + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Add global query parameters + queryParams = {...queryParams, ...this.queryParams}; + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw TavilyClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + return await client.send(request); + } + + // ------------------------------------------ + // METHOD: makeRequestStream + // ------------------------------------------ + + /// Reusable request stream method + @protected + Future makeRequestStream({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.StreamedResponse response; + try { + response = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } + + // ------------------------------------------ + // METHOD: makeRequest + // ------------------------------------------ + + /// Reusable request method + @protected + Future makeRequest({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + final uri = Uri.parse((this.baseUrl ?? baseUrl) + path); + late http.Response response; + try { + final streamedResponse = await _request( + baseUrl: baseUrl, + path: path, + method: method, + queryParams: queryParams, + headerParams: headerParams, + requestType: requestType, + responseType: responseType, + body: body, + ); + response = await http.Response.fromStream(streamedResponse); + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw TavilyClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw TavilyClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: search + // ------------------------------------------ + + /// Search for data based on a query. + /// + /// `request`: The search request object. + /// + /// `POST` `https://api.tavily.com/search` + Future search({ + required SearchRequest request, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.tavily.com', + path: '/search', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return SearchResponse.fromJson(_jsonDecode(r)); + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.dart b/packages/tavily_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..4b3ba505 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,15 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library tavily_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'search_request.dart'; +part 'search_response.dart'; +part 'search_result.dart'; diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..cc459594 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,1027 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#adding-getters-and-methods-to-our-models'); + +SearchRequest _$SearchRequestFromJson(Map json) { + return _SearchRequest.fromJson(json); +} + +/// @nodoc +mixin _$SearchRequest { + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey => throw _privateConstructorUsedError; + + /// The search query string. + String get query => throw _privateConstructorUsedError; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth => + throw _privateConstructorUsedError; + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages => throw _privateConstructorUsedError; + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer => throw _privateConstructorUsedError; + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent => throw _privateConstructorUsedError; + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults => throw _privateConstructorUsedError; + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains => throw _privateConstructorUsedError; + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchRequestCopyWith<$Res> { + factory $SearchRequestCopyWith( + SearchRequest value, $Res Function(SearchRequest) then) = + _$SearchRequestCopyWithImpl<$Res, SearchRequest>; + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class _$SearchRequestCopyWithImpl<$Res, $Val extends SearchRequest> + implements $SearchRequestCopyWith<$Res> { + _$SearchRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_value.copyWith( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value.includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value.excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchRequestImplCopyWith<$Res> + implements $SearchRequestCopyWith<$Res> { + factory _$$SearchRequestImplCopyWith( + _$SearchRequestImpl value, $Res Function(_$SearchRequestImpl) then) = + __$$SearchRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'api_key') String apiKey, + String query, + @JsonKey(name: 'search_depth') SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') bool includeImages, + @JsonKey(name: 'include_answer') bool includeAnswer, + @JsonKey(name: 'include_raw_content') bool includeRawContent, + @JsonKey(name: 'max_results') int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains}); +} + +/// @nodoc +class __$$SearchRequestImplCopyWithImpl<$Res> + extends _$SearchRequestCopyWithImpl<$Res, _$SearchRequestImpl> + implements _$$SearchRequestImplCopyWith<$Res> { + __$$SearchRequestImplCopyWithImpl( + _$SearchRequestImpl _value, $Res Function(_$SearchRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? apiKey = null, + Object? query = null, + Object? searchDepth = null, + Object? includeImages = null, + Object? includeAnswer = null, + Object? includeRawContent = null, + Object? maxResults = null, + Object? includeDomains = freezed, + Object? excludeDomains = freezed, + }) { + return _then(_$SearchRequestImpl( + apiKey: null == apiKey + ? _value.apiKey + : apiKey // ignore: cast_nullable_to_non_nullable + as String, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + searchDepth: null == searchDepth + ? _value.searchDepth + : searchDepth // ignore: cast_nullable_to_non_nullable + as SearchRequestSearchDepth, + includeImages: null == includeImages + ? _value.includeImages + : includeImages // ignore: cast_nullable_to_non_nullable + as bool, + includeAnswer: null == includeAnswer + ? _value.includeAnswer + : includeAnswer // ignore: cast_nullable_to_non_nullable + as bool, + includeRawContent: null == includeRawContent + ? _value.includeRawContent + : includeRawContent // ignore: cast_nullable_to_non_nullable + as bool, + maxResults: null == maxResults + ? _value.maxResults + : maxResults // ignore: cast_nullable_to_non_nullable + as int, + includeDomains: freezed == includeDomains + ? _value._includeDomains + : includeDomains // ignore: cast_nullable_to_non_nullable + as List?, + excludeDomains: freezed == excludeDomains + ? _value._excludeDomains + : excludeDomains // ignore: cast_nullable_to_non_nullable + as List?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchRequestImpl extends _SearchRequest { + const _$SearchRequestImpl( + {@JsonKey(name: 'api_key') required this.apiKey, + required this.query, + @JsonKey(name: 'search_depth') + this.searchDepth = SearchRequestSearchDepth.basic, + @JsonKey(name: 'include_images') this.includeImages = false, + @JsonKey(name: 'include_answer') this.includeAnswer = false, + @JsonKey(name: 'include_raw_content') this.includeRawContent = false, + @JsonKey(name: 'max_results') this.maxResults = 5, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) + : _includeDomains = includeDomains, + _excludeDomains = excludeDomains, + super._(); + + factory _$SearchRequestImpl.fromJson(Map json) => + _$$SearchRequestImplFromJson(json); + + /// Your unique API key. + @override + @JsonKey(name: 'api_key') + final String apiKey; + + /// The search query string. + @override + final String query; + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @override + @JsonKey(name: 'search_depth') + final SearchRequestSearchDepth searchDepth; + + /// Include a list of query related images in the response. Default is False. + @override + @JsonKey(name: 'include_images') + final bool includeImages; + + /// Include answers in the search results. Default is False. + @override + @JsonKey(name: 'include_answer') + final bool includeAnswer; + + /// Include raw content in the search results. Default is False. + @override + @JsonKey(name: 'include_raw_content') + final bool includeRawContent; + + /// The number of maximum search results to return. Default is 5. + @override + @JsonKey(name: 'max_results') + final int maxResults; + + /// A list of domains to specifically include in the search results. Default is None. + final List? _includeDomains; + + /// A list of domains to specifically include in the search results. Default is None. + @override + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains { + final value = _includeDomains; + if (value == null) return null; + if (_includeDomains is EqualUnmodifiableListView) return _includeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of domains to specifically exclude from the search results. Default is None. + final List? _excludeDomains; + + /// A list of domains to specifically exclude from the search results. Default is None. + @override + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains { + final value = _excludeDomains; + if (value == null) return null; + if (_excludeDomains is EqualUnmodifiableListView) return _excludeDomains; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'SearchRequest(apiKey: $apiKey, query: $query, searchDepth: $searchDepth, includeImages: $includeImages, includeAnswer: $includeAnswer, includeRawContent: $includeRawContent, maxResults: $maxResults, includeDomains: $includeDomains, excludeDomains: $excludeDomains)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchRequestImpl && + (identical(other.apiKey, apiKey) || other.apiKey == apiKey) && + (identical(other.query, query) || other.query == query) && + (identical(other.searchDepth, searchDepth) || + other.searchDepth == searchDepth) && + (identical(other.includeImages, includeImages) || + other.includeImages == includeImages) && + (identical(other.includeAnswer, includeAnswer) || + other.includeAnswer == includeAnswer) && + (identical(other.includeRawContent, includeRawContent) || + other.includeRawContent == includeRawContent) && + (identical(other.maxResults, maxResults) || + other.maxResults == maxResults) && + const DeepCollectionEquality() + .equals(other._includeDomains, _includeDomains) && + const DeepCollectionEquality() + .equals(other._excludeDomains, _excludeDomains)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + apiKey, + query, + searchDepth, + includeImages, + includeAnswer, + includeRawContent, + maxResults, + const DeepCollectionEquality().hash(_includeDomains), + const DeepCollectionEquality().hash(_excludeDomains)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + __$$SearchRequestImplCopyWithImpl<_$SearchRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchRequestImplToJson( + this, + ); + } +} + +abstract class _SearchRequest extends SearchRequest { + const factory _SearchRequest( + {@JsonKey(name: 'api_key') required final String apiKey, + required final String query, + @JsonKey(name: 'search_depth') final SearchRequestSearchDepth searchDepth, + @JsonKey(name: 'include_images') final bool includeImages, + @JsonKey(name: 'include_answer') final bool includeAnswer, + @JsonKey(name: 'include_raw_content') final bool includeRawContent, + @JsonKey(name: 'max_results') final int maxResults, + @JsonKey(name: 'include_domains', includeIfNull: false) + final List? includeDomains, + @JsonKey(name: 'exclude_domains', includeIfNull: false) + final List? excludeDomains}) = _$SearchRequestImpl; + const _SearchRequest._() : super._(); + + factory _SearchRequest.fromJson(Map json) = + _$SearchRequestImpl.fromJson; + + @override + + /// Your unique API key. + @JsonKey(name: 'api_key') + String get apiKey; + @override + + /// The search query string. + String get query; + @override + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + SearchRequestSearchDepth get searchDepth; + @override + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') + bool get includeImages; + @override + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') + bool get includeAnswer; + @override + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + bool get includeRawContent; + @override + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') + int get maxResults; + @override + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? get includeDomains; + @override + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? get excludeDomains; + @override + @JsonKey(ignore: true) + _$$SearchRequestImplCopyWith<_$SearchRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResponse _$SearchResponseFromJson(Map json) { + return _SearchResponse.fromJson(json); +} + +/// @nodoc +mixin _$SearchResponse { + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer => throw _privateConstructorUsedError; + + /// Your search query. + String get query => throw _privateConstructorUsedError; + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime => throw _privateConstructorUsedError; + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images => throw _privateConstructorUsedError; + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions => throw _privateConstructorUsedError; + + /// A list of search results. + List get results => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResponseCopyWith<$Res> { + factory $SearchResponseCopyWith( + SearchResponse value, $Res Function(SearchResponse) then) = + _$SearchResponseCopyWithImpl<$Res, SearchResponse>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class _$SearchResponseCopyWithImpl<$Res, $Val extends SearchResponse> + implements $SearchResponseCopyWith<$Res> { + _$SearchResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_value.copyWith( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value.images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value.followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResponseImplCopyWith<$Res> + implements $SearchResponseCopyWith<$Res> { + factory _$$SearchResponseImplCopyWith(_$SearchResponseImpl value, + $Res Function(_$SearchResponseImpl) then) = + __$$SearchResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? answer, + String query, + @JsonKey(name: 'response_time') double responseTime, + @JsonKey(includeIfNull: false) List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + List results}); +} + +/// @nodoc +class __$$SearchResponseImplCopyWithImpl<$Res> + extends _$SearchResponseCopyWithImpl<$Res, _$SearchResponseImpl> + implements _$$SearchResponseImplCopyWith<$Res> { + __$$SearchResponseImplCopyWithImpl( + _$SearchResponseImpl _value, $Res Function(_$SearchResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? answer = freezed, + Object? query = null, + Object? responseTime = null, + Object? images = freezed, + Object? followUpQuestions = freezed, + Object? results = null, + }) { + return _then(_$SearchResponseImpl( + answer: freezed == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as String?, + query: null == query + ? _value.query + : query // ignore: cast_nullable_to_non_nullable + as String, + responseTime: null == responseTime + ? _value.responseTime + : responseTime // ignore: cast_nullable_to_non_nullable + as double, + images: freezed == images + ? _value._images + : images // ignore: cast_nullable_to_non_nullable + as List?, + followUpQuestions: freezed == followUpQuestions + ? _value._followUpQuestions + : followUpQuestions // ignore: cast_nullable_to_non_nullable + as List?, + results: null == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResponseImpl extends _SearchResponse { + const _$SearchResponseImpl( + {@JsonKey(includeIfNull: false) this.answer, + required this.query, + @JsonKey(name: 'response_time') required this.responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) + : _images = images, + _followUpQuestions = followUpQuestions, + _results = results, + super._(); + + factory _$SearchResponseImpl.fromJson(Map json) => + _$$SearchResponseImplFromJson(json); + + /// The answer to your search query. + @override + @JsonKey(includeIfNull: false) + final String? answer; + + /// Your search query. + @override + final String query; + + /// Your search result response time. + @override + @JsonKey(name: 'response_time') + final double responseTime; + + /// A list of query related image urls. + final List? _images; + + /// A list of query related image urls. + @override + @JsonKey(includeIfNull: false) + List? get images { + final value = _images; + if (value == null) return null; + if (_images is EqualUnmodifiableListView) return _images; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of suggested research follow up questions related to original query. + final List? _followUpQuestions; + + /// A list of suggested research follow up questions related to original query. + @override + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions { + final value = _followUpQuestions; + if (value == null) return null; + if (_followUpQuestions is EqualUnmodifiableListView) + return _followUpQuestions; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// A list of search results. + final List _results; + + /// A list of search results. + @override + List get results { + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_results); + } + + @override + String toString() { + return 'SearchResponse(answer: $answer, query: $query, responseTime: $responseTime, images: $images, followUpQuestions: $followUpQuestions, results: $results)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResponseImpl && + (identical(other.answer, answer) || other.answer == answer) && + (identical(other.query, query) || other.query == query) && + (identical(other.responseTime, responseTime) || + other.responseTime == responseTime) && + const DeepCollectionEquality().equals(other._images, _images) && + const DeepCollectionEquality() + .equals(other._followUpQuestions, _followUpQuestions) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + answer, + query, + responseTime, + const DeepCollectionEquality().hash(_images), + const DeepCollectionEquality().hash(_followUpQuestions), + const DeepCollectionEquality().hash(_results)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + __$$SearchResponseImplCopyWithImpl<_$SearchResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$SearchResponseImplToJson( + this, + ); + } +} + +abstract class _SearchResponse extends SearchResponse { + const factory _SearchResponse( + {@JsonKey(includeIfNull: false) final String? answer, + required final String query, + @JsonKey(name: 'response_time') required final double responseTime, + @JsonKey(includeIfNull: false) final List? images, + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + final List? followUpQuestions, + required final List results}) = _$SearchResponseImpl; + const _SearchResponse._() : super._(); + + factory _SearchResponse.fromJson(Map json) = + _$SearchResponseImpl.fromJson; + + @override + + /// The answer to your search query. + @JsonKey(includeIfNull: false) + String? get answer; + @override + + /// Your search query. + String get query; + @override + + /// Your search result response time. + @JsonKey(name: 'response_time') + double get responseTime; + @override + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) + List? get images; + @override + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? get followUpQuestions; + @override + + /// A list of search results. + List get results; + @override + @JsonKey(ignore: true) + _$$SearchResponseImplCopyWith<_$SearchResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +SearchResult _$SearchResultFromJson(Map json) { + return _SearchResult.fromJson(json); +} + +/// @nodoc +mixin _$SearchResult { + /// The title of the search result url. + String get title => throw _privateConstructorUsedError; + + /// The url of the search result. + String get url => throw _privateConstructorUsedError; + + /// The most query related content from the scraped url. + String get content => throw _privateConstructorUsedError; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent => throw _privateConstructorUsedError; + + /// The relevance score of the search result. + double get score => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $SearchResultCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $SearchResultCopyWith<$Res> { + factory $SearchResultCopyWith( + SearchResult value, $Res Function(SearchResult) then) = + _$SearchResultCopyWithImpl<$Res, SearchResult>; + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class _$SearchResultCopyWithImpl<$Res, $Val extends SearchResult> + implements $SearchResultCopyWith<$Res> { + _$SearchResultCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_value.copyWith( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$SearchResultImplCopyWith<$Res> + implements $SearchResultCopyWith<$Res> { + factory _$$SearchResultImplCopyWith( + _$SearchResultImpl value, $Res Function(_$SearchResultImpl) then) = + __$$SearchResultImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String title, + String url, + String content, + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + double score}); +} + +/// @nodoc +class __$$SearchResultImplCopyWithImpl<$Res> + extends _$SearchResultCopyWithImpl<$Res, _$SearchResultImpl> + implements _$$SearchResultImplCopyWith<$Res> { + __$$SearchResultImplCopyWithImpl( + _$SearchResultImpl _value, $Res Function(_$SearchResultImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? title = null, + Object? url = null, + Object? content = null, + Object? rawContent = freezed, + Object? score = null, + }) { + return _then(_$SearchResultImpl( + title: null == title + ? _value.title + : title // ignore: cast_nullable_to_non_nullable + as String, + url: null == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String, + content: null == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String, + rawContent: freezed == rawContent + ? _value.rawContent + : rawContent // ignore: cast_nullable_to_non_nullable + as String?, + score: null == score + ? _value.score + : score // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$SearchResultImpl extends _SearchResult { + const _$SearchResultImpl( + {required this.title, + required this.url, + required this.content, + @JsonKey(name: 'raw_content', includeIfNull: false) this.rawContent, + required this.score}) + : super._(); + + factory _$SearchResultImpl.fromJson(Map json) => + _$$SearchResultImplFromJson(json); + + /// The title of the search result url. + @override + final String title; + + /// The url of the search result. + @override + final String url; + + /// The most query related content from the scraped url. + @override + final String content; + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @override + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent; + + /// The relevance score of the search result. + @override + final double score; + + @override + String toString() { + return 'SearchResult(title: $title, url: $url, content: $content, rawContent: $rawContent, score: $score)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$SearchResultImpl && + (identical(other.title, title) || other.title == title) && + (identical(other.url, url) || other.url == url) && + (identical(other.content, content) || other.content == content) && + (identical(other.rawContent, rawContent) || + other.rawContent == rawContent) && + (identical(other.score, score) || other.score == score)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, title, url, content, rawContent, score); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + __$$SearchResultImplCopyWithImpl<_$SearchResultImpl>(this, _$identity); + + @override + Map toJson() { + return _$$SearchResultImplToJson( + this, + ); + } +} + +abstract class _SearchResult extends SearchResult { + const factory _SearchResult( + {required final String title, + required final String url, + required final String content, + @JsonKey(name: 'raw_content', includeIfNull: false) + final String? rawContent, + required final double score}) = _$SearchResultImpl; + const _SearchResult._() : super._(); + + factory _SearchResult.fromJson(Map json) = + _$SearchResultImpl.fromJson; + + @override + + /// The title of the search result url. + String get title; + @override + + /// The url of the search result. + String get url; + @override + + /// The most query related content from the scraped url. + String get content; + @override + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) + String? get rawContent; + @override + + /// The relevance score of the search result. + double get score; + @override + @JsonKey(ignore: true) + _$$SearchResultImplCopyWith<_$SearchResultImpl> get copyWith => + throw _privateConstructorUsedError; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/schema.g.dart b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..f9214d02 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,116 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names, unnecessary_null_checks + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$SearchRequestImpl _$$SearchRequestImplFromJson(Map json) => + _$SearchRequestImpl( + apiKey: json['api_key'] as String, + query: json['query'] as String, + searchDepth: $enumDecodeNullable( + _$SearchRequestSearchDepthEnumMap, json['search_depth']) ?? + SearchRequestSearchDepth.basic, + includeImages: json['include_images'] as bool? ?? false, + includeAnswer: json['include_answer'] as bool? ?? false, + includeRawContent: json['include_raw_content'] as bool? ?? false, + maxResults: (json['max_results'] as num?)?.toInt() ?? 5, + includeDomains: (json['include_domains'] as List?) + ?.map((e) => e as String) + .toList(), + excludeDomains: (json['exclude_domains'] as List?) + ?.map((e) => e as String) + .toList(), + ); + +Map _$$SearchRequestImplToJson(_$SearchRequestImpl instance) { + final val = { + 'api_key': instance.apiKey, + 'query': instance.query, + 'search_depth': _$SearchRequestSearchDepthEnumMap[instance.searchDepth]!, + 'include_images': instance.includeImages, + 'include_answer': instance.includeAnswer, + 'include_raw_content': instance.includeRawContent, + 'max_results': instance.maxResults, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('include_domains', instance.includeDomains); + writeNotNull('exclude_domains', instance.excludeDomains); + return val; +} + +const _$SearchRequestSearchDepthEnumMap = { + SearchRequestSearchDepth.basic: 'basic', + SearchRequestSearchDepth.advanced: 'advanced', +}; + +_$SearchResponseImpl _$$SearchResponseImplFromJson(Map json) => + _$SearchResponseImpl( + answer: json['answer'] as String?, + query: json['query'] as String, + responseTime: (json['response_time'] as num).toDouble(), + images: + (json['images'] as List?)?.map((e) => e as String).toList(), + followUpQuestions: (json['follow_up_questions'] as List?) + ?.map((e) => e as String) + .toList(), + results: (json['results'] as List) + .map((e) => SearchResult.fromJson(e as Map)) + .toList(), + ); + +Map _$$SearchResponseImplToJson( + _$SearchResponseImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('answer', instance.answer); + val['query'] = instance.query; + val['response_time'] = instance.responseTime; + writeNotNull('images', instance.images); + writeNotNull('follow_up_questions', instance.followUpQuestions); + val['results'] = instance.results.map((e) => e.toJson()).toList(); + return val; +} + +_$SearchResultImpl _$$SearchResultImplFromJson(Map json) => + _$SearchResultImpl( + title: json['title'] as String, + url: json['url'] as String, + content: json['content'] as String, + rawContent: json['raw_content'] as String?, + score: (json['score'] as num).toDouble(), + ); + +Map _$$SearchResultImplToJson(_$SearchResultImpl instance) { + final val = { + 'title': instance.title, + 'url': instance.url, + 'content': instance.content, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('raw_content', instance.rawContent); + val['score'] = instance.score; + return val; +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_request.dart b/packages/tavily_dart/lib/src/generated/schema/search_request.dart new file mode 100644 index 00000000..c0d16e7a --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_request.dart @@ -0,0 +1,103 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchRequest +// ========================================== + +/// The search request object. +@freezed +class SearchRequest with _$SearchRequest { + const SearchRequest._(); + + /// Factory constructor for SearchRequest + const factory SearchRequest({ + /// Your unique API key. + @JsonKey(name: 'api_key') required String apiKey, + + /// The search query string. + required String query, + + /// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + @JsonKey(name: 'search_depth') + @Default(SearchRequestSearchDepth.basic) + SearchRequestSearchDepth searchDepth, + + /// Include a list of query related images in the response. Default is False. + @JsonKey(name: 'include_images') @Default(false) bool includeImages, + + /// Include answers in the search results. Default is False. + @JsonKey(name: 'include_answer') @Default(false) bool includeAnswer, + + /// Include raw content in the search results. Default is False. + @JsonKey(name: 'include_raw_content') + @Default(false) + bool includeRawContent, + + /// The number of maximum search results to return. Default is 5. + @JsonKey(name: 'max_results') @Default(5) int maxResults, + + /// A list of domains to specifically include in the search results. Default is None. + @JsonKey(name: 'include_domains', includeIfNull: false) + List? includeDomains, + + /// A list of domains to specifically exclude from the search results. Default is None. + @JsonKey(name: 'exclude_domains', includeIfNull: false) + List? excludeDomains, + }) = _SearchRequest; + + /// Object construction from a JSON representation + factory SearchRequest.fromJson(Map json) => + _$SearchRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'api_key', + 'query', + 'search_depth', + 'include_images', + 'include_answer', + 'include_raw_content', + 'max_results', + 'include_domains', + 'exclude_domains' + ]; + + /// Validation constants + static const maxResultsDefaultValue = 5; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'api_key': apiKey, + 'query': query, + 'search_depth': searchDepth, + 'include_images': includeImages, + 'include_answer': includeAnswer, + 'include_raw_content': includeRawContent, + 'max_results': maxResults, + 'include_domains': includeDomains, + 'exclude_domains': excludeDomains, + }; + } +} + +// ========================================== +// ENUM: SearchRequestSearchDepth +// ========================================== + +/// The depth of the search. It can be 'basic' or advanced. Default is 'basic'. +enum SearchRequestSearchDepth { + @JsonValue('basic') + basic, + @JsonValue('advanced') + advanced, +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_response.dart b/packages/tavily_dart/lib/src/generated/schema/search_response.dart new file mode 100644 index 00000000..473db9c1 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_response.dart @@ -0,0 +1,68 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResponse +// ========================================== + +/// The response data from the search query. +@freezed +class SearchResponse with _$SearchResponse { + const SearchResponse._(); + + /// Factory constructor for SearchResponse + const factory SearchResponse({ + /// The answer to your search query. + @JsonKey(includeIfNull: false) String? answer, + + /// Your search query. + required String query, + + /// Your search result response time. + @JsonKey(name: 'response_time') required double responseTime, + + /// A list of query related image urls. + @JsonKey(includeIfNull: false) List? images, + + /// A list of suggested research follow up questions related to original query. + @JsonKey(name: 'follow_up_questions', includeIfNull: false) + List? followUpQuestions, + + /// A list of search results. + required List results, + }) = _SearchResponse; + + /// Object construction from a JSON representation + factory SearchResponse.fromJson(Map json) => + _$SearchResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'answer', + 'query', + 'response_time', + 'images', + 'follow_up_questions', + 'results' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'answer': answer, + 'query': query, + 'response_time': responseTime, + 'images': images, + 'follow_up_questions': followUpQuestions, + 'results': results, + }; + } +} diff --git a/packages/tavily_dart/lib/src/generated/schema/search_result.dart b/packages/tavily_dart/lib/src/generated/schema/search_result.dart new file mode 100644 index 00000000..cfb75690 --- /dev/null +++ b/packages/tavily_dart/lib/src/generated/schema/search_result.dart @@ -0,0 +1,62 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of tavily_schema; + +// ========================================== +// CLASS: SearchResult +// ========================================== + +/// The search result object. +@freezed +class SearchResult with _$SearchResult { + const SearchResult._(); + + /// Factory constructor for SearchResult + const factory SearchResult({ + /// The title of the search result url. + required String title, + + /// The url of the search result. + required String url, + + /// The most query related content from the scraped url. + required String content, + + /// The parsed and cleaned HTML of the site. For now includes parsed text only. + @JsonKey(name: 'raw_content', includeIfNull: false) String? rawContent, + + /// The relevance score of the search result. + required double score, + }) = _SearchResult; + + /// Object construction from a JSON representation + factory SearchResult.fromJson(Map json) => + _$SearchResultFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'title', + 'url', + 'content', + 'raw_content', + 'score' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'title': title, + 'url': url, + 'content': content, + 'raw_content': rawContent, + 'score': score, + }; + } +} diff --git a/packages/tavily_dart/lib/tavily_dart.dart b/packages/tavily_dart/lib/tavily_dart.dart new file mode 100644 index 00000000..272b33ce --- /dev/null +++ b/packages/tavily_dart/lib/tavily_dart.dart @@ -0,0 +1,5 @@ +/// Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +library; + +export 'src/generated/client.dart'; +export 'src/generated/schema/schema.dart'; diff --git a/packages/tavily_dart/oas/main.dart b/packages/tavily_dart/oas/main.dart new file mode 100644 index 00000000..bf08264b --- /dev/null +++ b/packages/tavily_dart/oas/main.dart @@ -0,0 +1,23 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Tavily API client Dart code from the OpenAPI spec. +/// https://docs.tavily.com/docs/tavily-api/rest_api +void main() async { + final spec = OpenApi.fromFile(source: 'oas/tavily_openapi.yaml'); + + await spec.generate( + package: 'Tavily', + destination: 'lib/src/generated/', + replace: true, + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} diff --git a/packages/tavily_dart/oas/tavily_openapi.yaml b/packages/tavily_dart/oas/tavily_openapi.yaml new file mode 100644 index 00000000..250fa447 --- /dev/null +++ b/packages/tavily_dart/oas/tavily_openapi.yaml @@ -0,0 +1,156 @@ +openapi: 3.0.3 + +info: + title: Tavily API + description: Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience. + version: 1.0.0 + contact: + name: Tavily Support + url: https://tavily.com + +servers: + - url: https://api.tavily.com + +paths: + /search: + post: + summary: Search for data based on a query. + operationId: search + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SearchRequest' + responses: + '200': + description: Successful search response. + content: + application/json: + schema: + $ref: '#/components/schemas/SearchResponse' + '400': + description: Bad Request — Your request is invalid. + '401': + description: Unauthorized — Your API key is wrong. + '403': + description: Forbidden — The endpoint requested is hidden for administrators only. + '404': + description: Not Found — The specified endpoint could not be found. + '405': + description: Method Not Allowed — You tried to access an endpoint with an invalid method. + '429': + description: Too Many Requests — You're requesting too many results! Slow down! + '500': + description: Internal Server Error — We had a problem with our server. Try again later. + '503': + description: Service Unavailable — We're temporarily offline for maintenance. Please try again later. + '504': + description: Gateway Timeout — We're temporarily offline for maintenance. Please try again later. + +components: + schemas: + SearchRequest: + type: object + description: The search request object. + properties: + api_key: + type: string + description: Your unique API key. + example: "your api key" + query: + type: string + description: The search query string. + example: "your search query" + search_depth: + type: string + description: The depth of the search. It can be 'basic' or advanced. Default is 'basic'. + enum: + - basic + - advanced + default: basic + include_images: + type: boolean + description: Include a list of query related images in the response. Default is False. + default: false + include_answer: + type: boolean + description: Include answers in the search results. Default is False. + default: false + include_raw_content: + type: boolean + description: Include raw content in the search results. Default is False. + default: false + max_results: + type: integer + description: The number of maximum search results to return. Default is 5. + default: 5 + include_domains: + type: array + items: + type: string + description: A list of domains to specifically include in the search results. Default is None. + exclude_domains: + type: array + items: + type: string + description: A list of domains to specifically exclude from the search results. Default is None. + required: + - api_key + - query + SearchResponse: + type: object + description: The response data from the search query. + properties: + answer: + type: string + description: The answer to your search query. + query: + type: string + description: Your search query. + response_time: + type: number + description: Your search result response time. + images: + type: array + items: + type: string + description: A list of query related image urls. + follow_up_questions: + type: array + items: + type: string + description: A list of suggested research follow up questions related to original query. + results: + type: array + description: A list of search results. + items: + $ref: '#/components/schemas/SearchResult' + required: + - query + - response_time + - results + SearchResult: + type: object + description: The search result object. + properties: + title: + type: string + description: The title of the search result url. + url: + type: string + description: The url of the search result. + content: + type: string + description: The most query related content from the scraped url. + raw_content: + type: string + description: The parsed and cleaned HTML of the site. For now includes parsed text only. + score: + type: number + description: The relevance score of the search result. + required: + - title + - url + - content + - score diff --git a/packages/tavily_dart/pubspec.yaml b/packages/tavily_dart/pubspec.yaml new file mode 100644 index 00000000..29519674 --- /dev/null +++ b/packages/tavily_dart/pubspec.yaml @@ -0,0 +1,34 @@ +name: tavily_dart +description: Dart Client for the Tavily API (a search engine optimized for LLMs and RAG). +version: 0.1.0 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/tavily_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:tavily_dart +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.dev + +topics: + - ai + - llms + - search + - rag + +environment: + sdk: ">=3.4.0 <4.0.0" + +dependencies: + fetch_client: ^1.1.2 + freezed_annotation: ^2.4.2 + http: ^1.2.2 + json_annotation: ^4.9.0 + meta: ^1.11.0 + +dev_dependencies: + build_runner: ^2.4.11 + freezed: ^2.5.7 + json_serializable: ^6.8.0 + # openapi_spec: ^0.7.8 + openapi_spec: + git: + url: https://github.com/davidmigloz/openapi_spec.git + ref: 93230a5e346b02789f0f727da8eecea9c7bdf118 + test: ^1.25.8 diff --git a/packages/tavily_dart/test/tavily_test.dart b/packages/tavily_dart/test/tavily_test.dart new file mode 100644 index 00000000..0df02cb8 --- /dev/null +++ b/packages/tavily_dart/test/tavily_test.dart @@ -0,0 +1,45 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:tavily_dart/tavily_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('Tavily API tests', () { + late TavilyClient client; + + setUp(() async { + client = TavilyClient(); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call search API', () async { + final res = await client.search( + request: SearchRequest( + apiKey: Platform.environment['TAVILY_API_KEY']!, + query: 'Should I invest in Apple right now?', + includeAnswer: true, + includeImages: true, + includeRawContent: true, + maxResults: 3, + ), + ); + expect(res.answer, isNotEmpty); + expect(res.query, 'Should I invest in Apple right now?'); + expect(res.responseTime, greaterThan(0)); + expect(res.images, isNotEmpty); + expect(res.results, hasLength(3)); + final result = res.results.first; + expect(result.title, isNotEmpty); + expect(result.url, isNotEmpty); + expect(result.content, isNotEmpty); + expect(result.rawContent, isNotEmpty); + expect(result.score, greaterThan(0)); + }); + }); +} diff --git a/packages/vertex_ai/CHANGELOG.md b/packages/vertex_ai/CHANGELOG.md index f081d3a9..372ba2dc 100644 --- a/packages/vertex_ai/CHANGELOG.md +++ b/packages/vertex_ai/CHANGELOG.md @@ -1,3 +1,15 @@ +📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details. + +--- + +## 0.1.0+2 + + - Update a dependency to the latest release. + +## 0.1.0+1 + + - Update a dependency to the latest release. + ## 0.1.0 - **REFACTOR**: Minor changes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96)) diff --git a/packages/vertex_ai/pubspec.yaml b/packages/vertex_ai/pubspec.yaml index 1edc8121..9e25d858 100644 --- a/packages/vertex_ai/pubspec.yaml +++ b/packages/vertex_ai/pubspec.yaml @@ -1,27 +1,26 @@ name: vertex_ai -description: GCP Vertex AI ML platform API client (PaLM, Matching Engine, etc.). -version: 0.1.0 +description: GCP Vertex AI ML platform API client (PaLM, Vector Search, etc.). +version: 0.1.0+2 repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/vertex_ai issue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:vertex_ai homepage: https://github.com/davidmigloz/langchain_dart -documentation: https://langchaindart.com +documentation: https://langchaindart.dev topics: - ai - nlp - llms - palm - - matching-engine environment: - sdk: ">=3.0.0 <4.0.0" + sdk: ">=3.4.0 <4.0.0" dependencies: - collection: '>=1.17.0 <1.19.0' - googleapis: ^12.0.0 - googleapis_auth: ^1.5.1 - http: ^1.1.0 + collection: ^1.18.0 + googleapis: ^13.0.0 + googleapis_auth: ^1.6.0 + http: ^1.2.2 meta: ^1.11.0 dev_dependencies: - test: ^1.25.2 + test: ^1.25.8 diff --git a/pubspec.yaml b/pubspec.yaml index 70fc02f6..8373da6a 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -4,4 +4,4 @@ environment: sdk: ">=3.0.0 <4.0.0" dev_dependencies: - melos: 6.0.0 + melos: 6.1.0