diff --git a/.changeset/add-deepseek-adapter.md b/.changeset/add-deepseek-adapter.md
new file mode 100644
index 00000000..ee29a26a
--- /dev/null
+++ b/.changeset/add-deepseek-adapter.md
@@ -0,0 +1,5 @@
+---
+'@tanstack/ai-deepseek': minor
+---
+
+Add DeepSeek adapter support with `@tanstack/ai-deepseek` package. This adapter provides access to DeepSeek's powerful language models including DeepSeek-V3 (deepseek-chat), DeepSeek Reasoner (deepseek-reasoner), and DeepSeek Coder (deepseek-coder) with streaming, tool calling, and structured output capabilities.
\ No newline at end of file
diff --git a/packages/typescript/ai-deepseek/CHANGELOG.md b/packages/typescript/ai-deepseek/CHANGELOG.md
new file mode 100644
index 00000000..3d114ef4
--- /dev/null
+++ b/packages/typescript/ai-deepseek/CHANGELOG.md
@@ -0,0 +1,23 @@
+# @tanstack/ai-deepseek
+
+## 0.1.0
+
+### Minor Changes
+
+- Initial release of DeepSeek adapter for TanStack AI
+- Added support for DeepSeek chat models:
+ - `deepseek-chat` - Latest DeepSeek-V3 model with strong reasoning capabilities
+ - `deepseek-reasoner` - Specialized reasoning model with enhanced logical thinking
+ - `deepseek-coder` - Code-specialized model based on DeepSeek-Coder-V2
+- Implemented tree-shakeable adapters:
+ - Text adapter for chat/completion functionality
+ - Summarization adapter for text summarization
+ - Image adapter (placeholder - DeepSeek doesn't support image generation)
+- Features:
+ - Streaming chat responses
+ - Function/tool calling with automatic execution
+ - Structured output with Zod schema validation through system prompts
+ - OpenAI-compatible API integration
+ - Full TypeScript support with per-model type inference
+ - Environment variable configuration (`DEEPSEEK_API_KEY`)
+ - Custom base URL support for enterprise deployments
diff --git a/packages/typescript/ai-deepseek/README.md b/packages/typescript/ai-deepseek/README.md
new file mode 100644
index 00000000..ee276f34
--- /dev/null
+++ b/packages/typescript/ai-deepseek/README.md
@@ -0,0 +1,313 @@
+
+

+
+
+
+
+
+
+
+
+
+
+### [Become a Sponsor!](https://github.com/sponsors/tannerlinsley/)
+
+
+# TanStack AI - DeepSeek Adapter
+
+DeepSeek adapter for TanStack AI - provides access to DeepSeek's powerful language models including DeepSeek-V3, DeepSeek Reasoner, and DeepSeek Coder.
+
+- **Tree-shakeable adapters** - Import only what you need for smaller bundles
+- **Type-safe model options** - Fully typed DeepSeek model configurations
+- **Tool calling support** - Function calling with automatic tool execution
+- **Streaming responses** - Real-time response streaming
+- **Structured output** - Type-safe responses with Zod schemas
+
+### Read the docs →
+
+## Tree-Shakeable Adapters
+
+Import only the functionality you need for smaller bundle sizes:
+
+```typescript
+// Only chat functionality - no summarization code bundled
+import { deepseekText } from '@tanstack/ai-deepseek/adapters'
+import { chat } from '@tanstack/ai'
+
+// Set your DeepSeek API key
+process.env.DEEPSEEK_API_KEY = 'sk-...'
+
+// Create adapter
+const adapter = deepseekText('deepseek-chat')
+
+// Start chatting
+const stream = chat({
+ adapter,
+ messages: [{ role: 'user', content: 'Hello, DeepSeek!' }]
+})
+
+for await (const chunk of stream) {
+ if (chunk.type === 'content') {
+ console.log(chunk.delta)
+ }
+}
+```
+
+Available adapters: `deepseekText`, `deepseekSummarize`
+
+## Installation
+
+```bash
+npm install @tanstack/ai-deepseek @tanstack/ai zod
+```
+
+## Supported Models
+
+### Chat/Text Models
+
+- `deepseek-chat` - Latest DeepSeek-V3 model with strong reasoning capabilities
+- `deepseek-reasoner` - Specialized reasoning model with enhanced logical thinking
+- `deepseek-coder` - Code-specialized model based on DeepSeek-Coder-V2
+
+## Features
+
+### Tool Calling
+
+DeepSeek supports function calling with automatic tool execution:
+
+```typescript
+import { toolDefinition } from '@tanstack/ai'
+import { z } from 'zod'
+
+const weatherTool = toolDefinition({
+ name: 'getWeather',
+ inputSchema: z.object({
+ location: z.string().describe('City name'),
+ }),
+ outputSchema: z.object({
+ temperature: z.number(),
+ condition: z.string(),
+ }),
+})
+
+const serverTool = weatherTool.server(async ({ location }) => {
+ // Your weather API call here
+ return { temperature: 72, condition: 'sunny' }
+})
+
+const stream = chat({
+ adapter: deepseekText('deepseek-chat'),
+ messages: [{ role: 'user', content: 'What\'s the weather in San Francisco?' }],
+ tools: [serverTool],
+})
+```
+
+### Structured Output
+
+Generate type-safe structured responses using Zod schemas:
+
+```typescript
+import { generate } from '@tanstack/ai'
+import { z } from 'zod'
+
+const schema = z.object({
+ summary: z.string(),
+ keyPoints: z.array(z.string()),
+ sentiment: z.enum(['positive', 'negative', 'neutral']),
+})
+
+const result = await generate({
+ adapter: deepseekText('deepseek-chat'),
+ messages: [{
+ role: 'user',
+ content: 'Analyze this product review: "Great product, love it!"'
+ }],
+ schema,
+})
+
+// result.data is fully typed according to your schema
+console.log(result.data.summary)
+console.log(result.data.keyPoints)
+console.log(result.data.sentiment)
+```
+
+### Streaming Support
+
+All adapters support streaming for real-time responses:
+
+```typescript
+const stream = chat({
+ adapter: deepseekText('deepseek-chat'),
+ messages: [{ role: 'user', content: 'Write a story about AI' }],
+})
+
+for await (const chunk of stream) {
+ switch (chunk.type) {
+ case 'content':
+ process.stdout.write(chunk.delta)
+ break
+ case 'tool_call':
+ console.log('Tool called:', chunk.toolCall.function.name)
+ break
+ case 'done':
+ console.log('\nUsage:', chunk.usage)
+ break
+ }
+}
+```
+
+## Configuration
+
+### Environment Variables
+
+Set your DeepSeek API key:
+
+```bash
+export DEEPSEEK_API_KEY="sk-your-api-key-here"
+```
+
+### Custom Configuration
+
+```typescript
+import { createDeepSeekText } from '@tanstack/ai-deepseek'
+
+const adapter = createDeepSeekText('deepseek-chat', 'sk-...', {
+ baseURL: 'https://api.deepseek.com', // Custom API endpoint
+})
+```
+
+### Model Options
+
+Configure model-specific parameters:
+
+```typescript
+const stream = chat({
+ adapter: deepseekText('deepseek-chat'),
+ messages: [{ role: 'user', content: 'Hello!' }],
+ temperature: 0.8, // Creativity (0-2)
+ maxTokens: 2000, // Response length
+ topP: 0.9, // Nucleus sampling
+ frequencyPenalty: 0.1, // Reduce repetition
+ presencePenalty: 0.1, // Encourage novelty
+ stop: ['END'], // Stop sequences
+})
+```
+
+## Type Safety
+
+This adapter provides full TypeScript support with:
+
+- **Per-model type inference** - Options are typed based on the selected model
+- **Tool type safety** - Automatic inference of tool input/output types
+- **Schema validation** - Runtime validation with Zod schemas
+
+```typescript
+// Model-specific type inference
+const adapter = deepseekText('deepseek-reasoner') // Types inferred for reasoning model
+
+// Tool type safety
+const tool = toolDefinition({
+ name: 'calculate',
+ inputSchema: z.object({ expression: z.string() }),
+ outputSchema: z.object({ result: z.number() }),
+})
+
+// Fully typed tool implementation
+const serverTool = tool.server(async ({ expression }) => {
+ // expression is typed as string
+ return { result: eval(expression) } // result must be { result: number }
+})
+```
+
+## DeepSeek Model Capabilities
+
+| Model | Strengths | Best For |
+|-------|-----------|----------|
+| `deepseek-chat` | General conversation, reasoning | Chat applications, general Q&A |
+| `deepseek-reasoner` | Enhanced logical thinking | Complex problem solving, analysis |
+| `deepseek-coder` | Code understanding and generation | Programming assistance, code review |
+
+## Limitations
+
+- **Image Generation**: DeepSeek does not currently support image generation
+- **Multimodal Input**: Limited support for non-text inputs compared to other providers
+- **Rate Limits**: Refer to DeepSeek's API documentation for current rate limits
+
+## Get Involved
+
+- We welcome issues and pull requests!
+- Participate in [GitHub discussions](https://github.com/TanStack/ai/discussions)
+- Chat with the community on [Discord](https://discord.com/invite/WrRKjPJ)
+- See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions
+
+## Partners
+
+
+
+
+

+
+We're looking for TanStack AI Partners to join our mission! Partner with us to push the boundaries of TanStack AI and build amazing things together.
+
+
LET'S CHAT
+
+
+## Explore the TanStack Ecosystem
+
+- TanStack Config – Tooling for JS/TS packages
+- TanStack DB – Reactive sync client store
+- TanStack Devtools – Unified devtools panel
+- TanStack Form – Type‑safe form state
+- TanStack Pacer – Debouncing, throttling, batching
+- TanStack Query – Async state & caching
+- TanStack Ranger – Range & slider primitives
+- TanStack Router – Type‑safe routing, caching & URL state
+- TanStack Start – Full‑stack SSR & streaming
+- TanStack Store – Reactive data store
+- TanStack Table – Headless datagrids
+- TanStack Virtual – Virtualized rendering
+
+… and more at TanStack.com »
+
+
\ No newline at end of file
diff --git a/packages/typescript/ai-deepseek/package.json b/packages/typescript/ai-deepseek/package.json
new file mode 100644
index 00000000..78e31685
--- /dev/null
+++ b/packages/typescript/ai-deepseek/package.json
@@ -0,0 +1,53 @@
+{
+ "name": "@tanstack/ai-deepseek",
+ "version": "0.1.0",
+ "description": "DeepSeek adapter for TanStack AI",
+ "author": "",
+ "license": "MIT",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/TanStack/ai.git",
+ "directory": "packages/typescript/ai-deepseek"
+ },
+ "type": "module",
+ "module": "./dist/esm/index.js",
+ "types": "./dist/esm/index.d.ts",
+ "exports": {
+ ".": {
+ "types": "./dist/esm/index.d.ts",
+ "import": "./dist/esm/index.js"
+ }
+ },
+ "files": [
+ "dist",
+ "src"
+ ],
+ "scripts": {
+ "build": "vite build",
+ "clean": "premove ./build ./dist",
+ "lint:fix": "eslint ./src --fix",
+ "test:build": "publint --strict",
+ "test:eslint": "eslint ./src",
+ "test:lib": "vitest run",
+ "test:lib:dev": "pnpm test:lib --watch",
+ "test:types": "tsc"
+ },
+ "keywords": [
+ "ai",
+ "deepseek",
+ "tanstack",
+ "adapter"
+ ],
+ "dependencies": {
+ "openai": "^6.9.1"
+ },
+ "devDependencies": {
+ "@vitest/coverage-v8": "4.0.14",
+ "vite": "^7.2.7",
+ "zod": "^4.2.0"
+ },
+ "peerDependencies": {
+ "@tanstack/ai": "workspace:^",
+ "zod": "^4.2.0"
+ }
+}
diff --git a/packages/typescript/ai-deepseek/src/adapters/summarize.ts b/packages/typescript/ai-deepseek/src/adapters/summarize.ts
new file mode 100644
index 00000000..acc92b27
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/adapters/summarize.ts
@@ -0,0 +1,174 @@
+import { BaseSummarizeAdapter } from '@tanstack/ai/adapters'
+import { getDeepSeekApiKeyFromEnv } from '../utils'
+import { DeepSeekTextAdapter } from './text'
+import type { DEEPSEEK_CHAT_MODELS } from '../model-meta'
+import type {
+ StreamChunk,
+ SummarizationOptions,
+ SummarizationResult,
+} from '@tanstack/ai'
+import type { DeepSeekClientConfig } from '../utils'
+
+/**
+ * Configuration for DeepSeek summarize adapter
+ */
+export interface DeepSeekSummarizeConfig extends DeepSeekClientConfig {}
+
+/**
+ * DeepSeek-specific provider options for summarization
+ */
+export interface DeepSeekSummarizeProviderOptions {
+ /** Temperature for response generation (0-2) */
+ temperature?: number
+ /** Maximum tokens in the response */
+ maxTokens?: number
+}
+
+/** Model type for DeepSeek summarization */
+export type DeepSeekSummarizeModel = (typeof DEEPSEEK_CHAT_MODELS)[number]
+
+/**
+ * DeepSeek Summarize Adapter
+ *
+ * A thin wrapper around the text adapter that adds summarization-specific prompting.
+ * Delegates all API calls to the DeepSeekTextAdapter.
+ */
+export class DeepSeekSummarizeAdapter<
+ TModel extends DeepSeekSummarizeModel,
+> extends BaseSummarizeAdapter {
+ readonly kind = 'summarize' as const
+ readonly name = 'deepseek' as const
+
+ private textAdapter: DeepSeekTextAdapter
+
+ constructor(config: DeepSeekSummarizeConfig, model: TModel) {
+ super({}, model)
+ this.textAdapter = new DeepSeekTextAdapter(config, model)
+ }
+
+ async summarize(options: SummarizationOptions): Promise {
+ const systemPrompt = this.buildSummarizationPrompt(options)
+
+ // Use the text adapter's streaming and collect the result
+ let summary = ''
+ let id = ''
+ let model = options.model
+ let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 }
+
+ for await (const chunk of this.textAdapter.chatStream({
+ model: options.model,
+ messages: [{ role: 'user', content: options.text }],
+ systemPrompts: [systemPrompt],
+ maxTokens: options.maxLength,
+ temperature: 0.3,
+ })) {
+ if (chunk.type === 'content') {
+ summary = chunk.content
+ id = chunk.id
+ model = chunk.model
+ }
+ if (chunk.type === 'done' && chunk.usage) {
+ usage = chunk.usage
+ }
+ }
+
+ return { id, model, summary, usage }
+ }
+
+ async *summarizeStream(
+ options: SummarizationOptions,
+ ): AsyncIterable {
+ const systemPrompt = this.buildSummarizationPrompt(options)
+
+ // Delegate directly to the text adapter's streaming
+ yield* this.textAdapter.chatStream({
+ model: options.model,
+ messages: [{ role: 'user', content: options.text }],
+ systemPrompts: [systemPrompt],
+ maxTokens: options.maxLength,
+ temperature: 0.3,
+ })
+ }
+
+ private buildSummarizationPrompt(options: SummarizationOptions): string {
+ let prompt = 'You are a professional summarizer. '
+
+ switch (options.style) {
+ case 'bullet-points':
+ prompt += 'Provide a summary in bullet point format. '
+ break
+ case 'paragraph':
+ prompt += 'Provide a summary in paragraph format. '
+ break
+ case 'concise':
+ prompt += 'Provide a very concise summary in 1-2 sentences. '
+ break
+ default:
+ prompt += 'Provide a clear and concise summary. '
+ }
+
+ if (options.focus && options.focus.length > 0) {
+ prompt += `Focus on the following aspects: ${options.focus.join(', ')}. `
+ }
+
+ if (options.maxLength) {
+ prompt += `Keep the summary under ${options.maxLength} tokens. `
+ }
+
+ return prompt
+ }
+}
+
+/**
+ * Creates a DeepSeek summarize adapter with explicit API key.
+ * Type resolution happens here at the call site.
+ *
+ * @param model - The model name (e.g., 'deepseek-chat', 'deepseek-reasoner')
+ * @param apiKey - Your DeepSeek API key
+ * @param config - Optional additional configuration
+ * @returns Configured DeepSeek summarize adapter instance with resolved types
+ *
+ * @example
+ * ```typescript
+ * const adapter = createDeepSeekSummarize('deepseek-chat', "sk-...");
+ * ```
+ */
+export function createDeepSeekSummarize(
+ model: TModel,
+ apiKey: string,
+ config?: Omit,
+): DeepSeekSummarizeAdapter {
+ return new DeepSeekSummarizeAdapter({ apiKey, ...config }, model)
+}
+
+/**
+ * Creates a DeepSeek summarize adapter with automatic API key detection from environment variables.
+ * Type resolution happens here at the call site.
+ *
+ * Looks for `DEEPSEEK_API_KEY` in:
+ * - `process.env` (Node.js)
+ * - `window.env` (Browser with injected env)
+ *
+ * @param model - The model name (e.g., 'deepseek-chat', 'deepseek-reasoner')
+ * @param config - Optional configuration (excluding apiKey which is auto-detected)
+ * @returns Configured DeepSeek summarize adapter instance with resolved types
+ * @throws Error if DEEPSEEK_API_KEY is not found in environment
+ *
+ * @example
+ * ```typescript
+ * // Automatically uses DEEPSEEK_API_KEY from environment
+ * const adapter = deepseekSummarize('deepseek-chat');
+ *
+ * await summarize({
+ * adapter,
+ * text: "Long article text..."
+ * });
+ * ```
+ */
+export function deepseekSummarize(
+ model: TModel,
+ config?: Omit,
+): DeepSeekSummarizeAdapter {
+ const apiKey = getDeepSeekApiKeyFromEnv()
+ return createDeepSeekSummarize(model, apiKey, config)
+}
diff --git a/packages/typescript/ai-deepseek/src/adapters/text.ts b/packages/typescript/ai-deepseek/src/adapters/text.ts
new file mode 100644
index 00000000..890ee9ed
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/adapters/text.ts
@@ -0,0 +1,675 @@
+import { BaseTextAdapter } from '@tanstack/ai/adapters'
+import { validateTextProviderOptions } from '../text/text-provider-options'
+import { convertToolsToProviderFormat } from '../tools'
+import {
+ createDeepSeekClient,
+ generateId,
+ getDeepSeekApiKeyFromEnv,
+} from '../utils'
+import type {
+ DEEPSEEK_CHAT_MODELS,
+ ResolveInputModalities,
+ ResolveProviderOptions,
+} from '../model-meta'
+import type {
+ StructuredOutputOptions,
+ StructuredOutputResult,
+} from '@tanstack/ai/adapters'
+import type OpenAI_SDK from 'openai'
+import type {
+ ContentPart,
+ ModelMessage,
+ StreamChunk,
+ TextOptions,
+} from '@tanstack/ai'
+import type { InternalTextProviderOptions } from '../text/text-provider-options'
+import type {
+ DeepSeekImageMetadata,
+ DeepSeekMessageMetadataByModality,
+} from '../message-types'
+import type { DeepSeekClientConfig } from '../utils'
+
+/**
+ * Configuration for DeepSeek text adapter
+ */
+export interface DeepSeekTextConfig extends DeepSeekClientConfig {}
+
+/**
+ * Alias for TextProviderOptions for external use
+ */
+export type { ExternalTextProviderOptions as DeepSeekTextProviderOptions } from '../text/text-provider-options'
+
+/**
+ * DeepSeek Text (Chat) Adapter
+ *
+ * Tree-shakeable adapter for DeepSeek chat/text completion functionality.
+ * Uses OpenAI-compatible Chat Completions API with DeepSeek-specific extensions.
+ *
+ * Features:
+ * - Standard chat/text completion
+ * - Thinking/Reasoning mode (chain-of-thought)
+ * - Tool calling
+ * - Structured output via JSON Schema
+ * - Multimodal support (text, images)
+ *
+ * Thinking Mode:
+ * - Automatically enabled for 'deepseek-reasoner' model
+ * - Can be manually enabled via modelOptions.thinking = true
+ * - Outputs reasoning content before final answer via 'thinking' stream chunks
+ * - Reasoning is preserved in multi-turn conversations via message metadata
+ * - Compatible with tool calling - reasoning context is maintained across tool invocations
+ *
+ * @example
+ * ```typescript
+ * // Basic thinking mode with deepseek-reasoner
+ * const adapter = deepseekText('deepseek-reasoner')
+ * const stream = chat({ adapter, messages: [...] })
+ *
+ * for await (const chunk of stream) {
+ * if (chunk.type === 'thinking') {
+ * console.log('Reasoning:', chunk.delta)
+ * } else if (chunk.type === 'content') {
+ * console.log('Response:', chunk.delta)
+ * }
+ * }
+ * ```
+ *
+ * @example
+ * ```typescript
+ * // Manual thinking mode with deepseek-chat
+ * const adapter = deepseekText('deepseek-chat')
+ * const stream = chat({
+ * adapter,
+ * messages: [...],
+ * modelOptions: { thinking: true }
+ * })
+ * ```
+ */
+export class DeepSeekTextAdapter<
+ TModel extends (typeof DEEPSEEK_CHAT_MODELS)[number],
+> extends BaseTextAdapter<
+ TModel,
+ ResolveProviderOptions,
+ ResolveInputModalities,
+ DeepSeekMessageMetadataByModality
+> {
+ readonly kind = 'text' as const
+ readonly name = 'deepseek' as const
+
+ private client: OpenAI_SDK
+
+ constructor(config: DeepSeekTextConfig, model: TModel) {
+ super({}, model)
+ this.client = createDeepSeekClient(config)
+ }
+
+ async *chatStream(
+ options: TextOptions>,
+ ): AsyncIterable {
+ const requestParams = this.mapTextOptionsToDeepSeek(options)
+
+ try {
+ const stream = await this.client.chat.completions.create({
+ ...requestParams,
+ stream: true,
+ })
+
+ yield* this.processDeepSeekStreamChunks(stream, options)
+ } catch (error: unknown) {
+ const err = error as Error & { code?: string; status?: number }
+ console.error(
+ '>>> DeepSeek chatStream: Fatal error during response creation <<<',
+ )
+ console.error('>>> Error message:', err.message)
+ console.error('>>> Error code:', err.code)
+ console.error('>>> Error status:', err.status)
+
+ // Yield error chunk before throwing
+ const timestamp = Date.now()
+ yield {
+ type: 'error',
+ id: generateId(this.name),
+ model: options.model,
+ timestamp,
+ error: {
+ message: err.message || 'Unknown error occurred during streaming',
+ code: err.code || err.status?.toString(),
+ },
+ }
+
+ throw error
+ }
+ }
+
+ /**
+ * Generate structured output using DeepSeek's JSON Schema response format.
+ * Uses stream: false to get the complete response in one call.
+ *
+ * DeepSeek supports structured output using their json_object mode:
+ * - Uses response_format: { type: 'json_object' } (not json_schema)
+ * - Schema is provided via prompt instructions (not API parameter)
+ * - Model is instructed to follow the schema format
+ * - Supports thinking mode - reasoning will be included in response if enabled
+ *
+ * Note: DeepSeek does NOT support OpenAI's json_schema format with strict validation.
+ * Instead, we embed the schema in the prompt and use json_object mode for valid JSON.
+ *
+ * @param options - Structured output options including chat options and output schema
+ * @returns Promise containing parsed data, raw text, and reasoning (if thinking mode enabled)
+ */
+ async structuredOutput(
+ options: StructuredOutputOptions>,
+ ): Promise> {
+ const { chatOptions, outputSchema } = options
+ const requestParams = this.mapTextOptionsToDeepSeek(chatOptions)
+
+ // Convert schema to a readable format for the prompt
+ const schemaDescription = JSON.stringify(outputSchema, null, 2)
+
+ // Add schema instruction to the system message or create one
+ const messages = [...requestParams.messages]
+ const schemaInstruction = `You must respond with valid JSON that matches this exact schema:
+
+${schemaDescription}
+
+Respond only with the JSON object, no additional text.`
+
+ // Add schema instruction to system message or create one
+ const systemMessageIndex = messages.findIndex(
+ (msg) => msg.role === 'system',
+ )
+ if (systemMessageIndex >= 0) {
+ const systemMessage = messages[systemMessageIndex]
+ if (systemMessage) {
+ messages[systemMessageIndex] = {
+ role: 'system',
+ content: `${systemMessage.content || ''}\n\n${schemaInstruction}`,
+ }
+ }
+ } else {
+ messages.unshift({
+ role: 'system',
+ content: schemaInstruction,
+ })
+ }
+
+ // Check if thinking mode should be enabled
+ const modelOptions = chatOptions.modelOptions as any
+ const enableThinking = this.shouldEnableThinking(
+ chatOptions.model,
+ modelOptions,
+ )
+
+ try {
+ const baseRequestParams: any = {
+ ...requestParams,
+ messages,
+ stream: false,
+ stream_options: undefined,
+ response_format: {
+ type: 'json_object',
+ },
+ }
+
+ // Add thinking mode if enabled
+ if (enableThinking) {
+ baseRequestParams.extra_body = {
+ thinking: { type: 'enabled' },
+ }
+ }
+
+ const response =
+ await this.client.chat.completions.create(baseRequestParams)
+
+ // Extract text content from the response
+ const rawText = response.choices[0]?.message.content || ''
+
+ // Extract reasoning content if present
+ const reasoningContent = this.extractReasoningContent(response)
+
+ // Parse the JSON response
+ let parsed: unknown
+ try {
+ parsed = JSON.parse(rawText)
+ } catch {
+ throw new Error(
+ `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`,
+ )
+ }
+
+ // Return the parsed JSON with reasoning if available
+ return {
+ data: parsed,
+ rawText,
+ ...(reasoningContent && { reasoning: reasoningContent }),
+ }
+ } catch (error: unknown) {
+ const err = error as Error
+ console.error('>>> structuredOutput: Error during response creation <<<')
+ console.error('>>> Error message:', err.message)
+ throw error
+ }
+ }
+
+ private async *processDeepSeekStreamChunks(
+ stream: AsyncIterable,
+ options: TextOptions,
+ ): AsyncIterable {
+ let accumulatedContent = ''
+ let accumulatedReasoning = ''
+ const timestamp = Date.now()
+ let responseId = generateId(this.name)
+
+ // Track tool calls being streamed (arguments come in chunks)
+ const toolCallsInProgress = new Map<
+ number,
+ {
+ id: string
+ name: string
+ arguments: string
+ }
+ >()
+
+ try {
+ for await (const chunk of stream) {
+ responseId = chunk.id || responseId
+ const choice = chunk.choices[0]
+
+ if (!choice) continue
+
+ const delta = choice.delta
+ const deltaContent = delta.content
+ const deltaToolCalls = delta.tool_calls
+ const deltaReasoning = (delta as any).reasoning_content
+
+ // Handle reasoning delta (thinking mode)
+ // DeepSeek provides reasoning content via delta.reasoning_content in streaming mode
+ if (deltaReasoning && typeof deltaReasoning === 'string') {
+ accumulatedReasoning += deltaReasoning
+ yield {
+ type: 'thinking',
+ id: responseId,
+ model: chunk.model || options.model,
+ timestamp,
+ delta: deltaReasoning,
+ content: accumulatedReasoning,
+ }
+ }
+
+ // Handle content delta
+ if (deltaContent) {
+ accumulatedContent += deltaContent
+ yield {
+ type: 'content',
+ id: responseId,
+ model: chunk.model || options.model,
+ timestamp,
+ delta: deltaContent,
+ content: accumulatedContent,
+ role: 'assistant',
+ }
+ }
+
+ // Handle tool calls - they come in as deltas
+ if (deltaToolCalls) {
+ for (const toolCallDelta of deltaToolCalls) {
+ const index = toolCallDelta.index
+
+ // Initialize or update the tool call in progress
+ if (!toolCallsInProgress.has(index)) {
+ toolCallsInProgress.set(index, {
+ id: toolCallDelta.id || '',
+ name: toolCallDelta.function?.name || '',
+ arguments: '',
+ })
+ }
+
+ const toolCall = toolCallsInProgress.get(index)!
+
+ // Update with any new data from the delta
+ if (toolCallDelta.id) {
+ toolCall.id = toolCallDelta.id
+ }
+ if (toolCallDelta.function?.name) {
+ toolCall.name = toolCallDelta.function.name
+ }
+ if (toolCallDelta.function?.arguments) {
+ toolCall.arguments += toolCallDelta.function.arguments
+ }
+ }
+ }
+
+ // Handle finish reason
+ if (choice.finish_reason) {
+ // Emit all completed tool calls
+ if (
+ choice.finish_reason === 'tool_calls' ||
+ toolCallsInProgress.size > 0
+ ) {
+ for (const [index, toolCall] of toolCallsInProgress) {
+ yield {
+ type: 'tool_call',
+ id: responseId,
+ model: chunk.model || options.model,
+ timestamp,
+ index,
+ toolCall: {
+ id: toolCall.id,
+ type: 'function',
+ function: {
+ name: toolCall.name,
+ arguments: toolCall.arguments,
+ },
+ },
+ }
+ }
+ }
+
+ yield {
+ type: 'done',
+ id: responseId,
+ model: chunk.model || options.model,
+ timestamp,
+ usage: chunk.usage
+ ? {
+ promptTokens: chunk.usage.prompt_tokens || 0,
+ completionTokens: chunk.usage.completion_tokens || 0,
+ totalTokens: chunk.usage.total_tokens || 0,
+ }
+ : undefined,
+ finishReason:
+ choice.finish_reason === 'tool_calls' ||
+ toolCallsInProgress.size > 0
+ ? 'tool_calls'
+ : 'stop',
+ }
+ }
+ }
+ } catch (error: unknown) {
+ const err = error as Error & { code?: string }
+ console.log('[DeepSeek Adapter] Stream ended with error:', err.message)
+ yield {
+ type: 'error',
+ id: responseId,
+ model: options.model,
+ timestamp,
+ error: {
+ message: err.message || 'Unknown error occurred',
+ code: err.code,
+ },
+ }
+ }
+ }
+
+ /**
+ * Maps common options to DeepSeek-specific Chat Completions format.
+ * Handles thinking mode configuration and OpenAI-compatible parameters.
+ *
+ * @param options - Text options including model, messages, and configuration
+ * @returns DeepSeek-compatible request parameters with thinking mode if enabled
+ */
+ private mapTextOptionsToDeepSeek(
+ options: TextOptions,
+ ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming {
+ const modelOptions = options.modelOptions as
+ | Omit<
+ InternalTextProviderOptions,
+ 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p'
+ >
+ | undefined
+
+ if (modelOptions) {
+ validateTextProviderOptions({
+ ...modelOptions,
+ model: options.model,
+ })
+ }
+
+ const tools = options.tools
+ ? convertToolsToProviderFormat(options.tools)
+ : undefined
+
+ // Build messages array with system prompts
+ const messages: Array =
+ []
+
+ // Add system prompts first
+ if (options.systemPrompts && options.systemPrompts.length > 0) {
+ messages.push({
+ role: 'system',
+ content: options.systemPrompts.join('\n'),
+ })
+ }
+
+ // Convert messages
+ for (const message of options.messages) {
+ messages.push(this.convertMessageToDeepSeek(message))
+ }
+
+ // Enable thinking mode for deepseek-reasoner model or when explicitly enabled
+ const enableThinking = this.shouldEnableThinking(
+ options.model,
+ modelOptions,
+ )
+
+ const baseParams: any = {
+ model: options.model,
+ messages,
+ temperature: options.temperature,
+ max_tokens: options.maxTokens,
+ top_p: options.topP,
+ tools: tools as Array,
+ stream: true,
+ stream_options: { include_usage: true },
+ }
+
+ // Add thinking mode if enabled
+ if (enableThinking) {
+ baseParams.extra_body = {
+ thinking: { type: 'enabled' },
+ }
+ }
+
+ return baseParams
+ }
+
+ private convertMessageToDeepSeek(
+ message: ModelMessage,
+ ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam {
+ // Handle tool messages
+ if (message.role === 'tool') {
+ return {
+ role: 'tool',
+ tool_call_id: message.toolCallId || '',
+ content:
+ typeof message.content === 'string'
+ ? message.content
+ : JSON.stringify(message.content),
+ }
+ }
+
+ // Handle assistant messages
+ if (message.role === 'assistant') {
+ const toolCalls = message.toolCalls?.map((tc) => ({
+ id: tc.id,
+ type: 'function' as const,
+ function: {
+ name: tc.function.name,
+ arguments:
+ typeof tc.function.arguments === 'string'
+ ? tc.function.arguments
+ : JSON.stringify(tc.function.arguments),
+ },
+ }))
+
+ const baseMessage: any = {
+ role: 'assistant',
+ content: this.extractTextContent(message.content),
+ ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}),
+ }
+
+ // Note: DeepSeek reasoning content is handled through streaming chunks,
+ // not through message metadata. Multi-turn conversations should not
+ // include reasoning_content in assistant messages per DeepSeek API guidelines.
+
+ return baseMessage
+ }
+
+ // Handle user messages - support multimodal content
+ const contentParts = this.normalizeContent(message.content)
+
+ // If only text, use simple string format
+ if (contentParts.length === 1 && contentParts[0]?.type === 'text') {
+ return {
+ role: 'user',
+ content: contentParts[0].content,
+ }
+ }
+
+ // Otherwise, use array format for multimodal
+ const parts: Array =
+ []
+ for (const part of contentParts) {
+ if (part.type === 'text') {
+ parts.push({ type: 'text', text: part.content })
+ } else if (part.type === 'image') {
+ const imageMetadata = part.metadata as DeepSeekImageMetadata | undefined
+ parts.push({
+ type: 'image_url',
+ image_url: {
+ url: part.source.value,
+ detail: imageMetadata?.detail || 'auto',
+ },
+ })
+ }
+ }
+
+ return {
+ role: 'user',
+ content: parts.length > 0 ? parts : '',
+ }
+ }
+
+ /**
+ * Normalizes message content to an array of ContentPart.
+ * Handles backward compatibility with string content.
+ */
+ private normalizeContent(
+ content: string | null | Array,
+ ): Array {
+ if (content === null) {
+ return []
+ }
+ if (typeof content === 'string') {
+ return [{ type: 'text', content: content }]
+ }
+ return content
+ }
+
+ /**
+ * Extracts text content from a content value that may be string, null, or ContentPart array.
+ */
+ private extractTextContent(
+ content: string | null | Array,
+ ): string {
+ if (content === null) {
+ return ''
+ }
+ if (typeof content === 'string') {
+ return content
+ }
+ // It's an array of ContentPart
+ return content
+ .filter((p) => p.type === 'text')
+ .map((p) => p.content)
+ .join('')
+ }
+
+ /**
+ * Helper function to extract reasoning content from DeepSeek response.
+ * Used for preserving reasoning context in multi-turn conversations.
+ *
+ * @param response - DeepSeek chat completion response
+ * @returns Reasoning content string if present, undefined otherwise
+ */
+ private extractReasoningContent(
+ response: OpenAI_SDK.Chat.Completions.ChatCompletion,
+ ): string | undefined {
+ const message = response.choices[0]?.message as any
+ return message?.reasoning_content
+ }
+
+ /**
+ * Helper function to determine if thinking mode should be enabled.
+ * Automatically enables for deepseek-reasoner model or when explicitly requested.
+ *
+ * @param model - Model name to check
+ * @param modelOptions - Model options that may contain thinking flag
+ * @returns True if thinking mode should be enabled
+ */
+ private shouldEnableThinking(model: string, modelOptions?: any): boolean {
+ return (
+ model === 'deepseek-reasoner' ||
+ (modelOptions && 'thinking' in modelOptions && modelOptions.thinking)
+ )
+ }
+}
+
+/**
+ * Creates a DeepSeek text adapter with explicit API key.
+ * Type resolution happens here at the call site.
+ *
+ * @param model - The model name (e.g., 'deepseek-chat', 'deepseek-reasoner')
+ * @param apiKey - Your DeepSeek API key
+ * @param config - Optional additional configuration
+ * @returns Configured DeepSeek text adapter instance with resolved types
+ *
+ * @example
+ * ```typescript
+ * const adapter = createDeepSeekText('deepseek-chat', "sk-...");
+ * // adapter has type-safe providerOptions for deepseek-chat
+ * ```
+ */
+export function createDeepSeekText<
+ TModel extends (typeof DEEPSEEK_CHAT_MODELS)[number],
+>(
+ model: TModel,
+ apiKey: string,
+ config?: Omit,
+): DeepSeekTextAdapter {
+ return new DeepSeekTextAdapter({ apiKey, ...config }, model)
+}
+
+/**
+ * Creates a DeepSeek text adapter with automatic API key detection from environment variables.
+ * Type resolution happens here at the call site.
+ *
+ * Looks for `DEEPSEEK_API_KEY` in:
+ * - `process.env` (Node.js)
+ * - `window.env` (Browser with injected env)
+ *
+ * @param model - The model name (e.g., 'deepseek-chat', 'deepseek-reasoner')
+ * @param config - Optional configuration (excluding apiKey which is auto-detected)
+ * @returns Configured DeepSeek text adapter instance with resolved types
+ * @throws Error if DEEPSEEK_API_KEY is not found in environment
+ *
+ * @example
+ * ```typescript
+ * // Automatically uses DEEPSEEK_API_KEY from environment
+ * const adapter = deepseekText('deepseek-chat');
+ *
+ * const stream = chat({
+ * adapter,
+ * messages: [{ role: "user", content: "Hello!" }]
+ * });
+ * ```
+ */
+export function deepseekText<
+ TModel extends (typeof DEEPSEEK_CHAT_MODELS)[number],
+>(
+ model: TModel,
+ config?: Omit,
+): DeepSeekTextAdapter {
+ const apiKey = getDeepSeekApiKeyFromEnv()
+ return createDeepSeekText(model, apiKey, config)
+}
diff --git a/packages/typescript/ai-deepseek/src/index.ts b/packages/typescript/ai-deepseek/src/index.ts
new file mode 100644
index 00000000..dd32bcc9
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/index.ts
@@ -0,0 +1,42 @@
+// ============================================================================
+// New Tree-Shakeable Adapters (Recommended)
+// ============================================================================
+
+// Text (Chat) adapter - for chat/text completion
+export {
+ DeepSeekTextAdapter,
+ createDeepSeekText,
+ deepseekText,
+ type DeepSeekTextConfig,
+ type DeepSeekTextProviderOptions,
+} from './adapters/text'
+
+// Summarize adapter - for text summarization
+export {
+ DeepSeekSummarizeAdapter,
+ createDeepSeekSummarize,
+ deepseekSummarize,
+ type DeepSeekSummarizeConfig,
+ type DeepSeekSummarizeProviderOptions,
+ type DeepSeekSummarizeModel,
+} from './adapters/summarize'
+
+// ============================================================================
+// Type Exports
+// ============================================================================
+
+export type {
+ DeepSeekChatModelProviderOptionsByName,
+ DeepSeekModelInputModalitiesByName,
+ ResolveProviderOptions,
+ ResolveInputModalities,
+} from './model-meta'
+export { DEEPSEEK_CHAT_MODELS } from './model-meta'
+export type {
+ DeepSeekTextMetadata,
+ DeepSeekImageMetadata,
+ DeepSeekAudioMetadata,
+ DeepSeekVideoMetadata,
+ DeepSeekDocumentMetadata,
+ DeepSeekMessageMetadataByModality,
+} from './message-types'
diff --git a/packages/typescript/ai-deepseek/src/message-types.ts b/packages/typescript/ai-deepseek/src/message-types.ts
new file mode 100644
index 00000000..312d25ee
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/message-types.ts
@@ -0,0 +1,70 @@
+/**
+ * DeepSeek-specific metadata types for multimodal content parts.
+ * These types extend the base ContentPart metadata with DeepSeek-specific options.
+ *
+ * DeepSeek uses an OpenAI-compatible API, so metadata types are similar to OpenAI.
+ * Note: DeepSeek currently has limited multimodal support compared to other providers.
+ *
+ * @see https://platform.deepseek.com/api-docs
+ */
+
+/**
+ * Metadata for DeepSeek image content parts.
+ * Controls how the model processes and analyzes images.
+ * Note: DeepSeek supports viewing/processing images but not generating them.
+ */
+export interface DeepSeekImageMetadata {
+ /**
+ * Controls how the model processes the image.
+ * - 'auto': Let the model decide based on image size and content
+ * - 'low': Use low resolution processing (faster, cheaper, less detail)
+ * - 'high': Use high resolution processing (slower, more expensive, more detail)
+ *
+ * @default 'auto'
+ */
+ detail?: 'auto' | 'low' | 'high'
+}
+
+/**
+ * Metadata for DeepSeek audio content parts.
+ * Specifies the audio format for proper processing.
+ * Note: Audio support is currently limited in DeepSeek models.
+ */
+export interface DeepSeekAudioMetadata {
+ /**
+ * The format of the audio.
+ * Supported formats: mp3, wav, flac, etc.
+ * @default 'mp3'
+ */
+ format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac'
+}
+
+/**
+ * Metadata for DeepSeek video content parts.
+ * Note: Video support is currently not available in DeepSeek models.
+ */
+export interface DeepSeekVideoMetadata {}
+
+/**
+ * Metadata for DeepSeek document content parts.
+ * Note: Direct document support may vary; PDFs often need to be converted to images.
+ */
+export interface DeepSeekDocumentMetadata {}
+
+/**
+ * Metadata for DeepSeek text content parts.
+ * Currently no specific metadata options for text in DeepSeek.
+ */
+export interface DeepSeekTextMetadata {}
+
+/**
+ * Map of modality types to their DeepSeek-specific metadata types.
+ * Used for type inference when constructing multimodal messages.
+ */
+export interface DeepSeekMessageMetadataByModality {
+ text: DeepSeekTextMetadata
+ image: DeepSeekImageMetadata
+ audio: DeepSeekAudioMetadata
+ video: DeepSeekVideoMetadata
+ document: DeepSeekDocumentMetadata
+}
diff --git a/packages/typescript/ai-deepseek/src/model-meta.ts b/packages/typescript/ai-deepseek/src/model-meta.ts
new file mode 100644
index 00000000..30594e60
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/model-meta.ts
@@ -0,0 +1,152 @@
+/**
+ * Model metadata interface for documentation and type inference
+ */
+interface ModelMeta {
+ name: string
+ supports: {
+ input: Array<'text' | 'image' | 'audio' | 'video' | 'document'>
+ output: Array<'text' | 'image' | 'audio' | 'video'>
+ capabilities?: Array<'reasoning' | 'tool_calling' | 'structured_outputs'>
+ }
+ max_input_tokens?: number
+ max_output_tokens?: number
+ context_window?: number
+ knowledge_cutoff?: string
+ pricing?: {
+ input: {
+ normal: number
+ cached?: number
+ }
+ output: {
+ normal: number
+ }
+ }
+}
+
+const DEEPSEEK_V3 = {
+ name: 'deepseek-chat',
+ context_window: 65_536,
+ supports: {
+ input: ['text', 'image'],
+ output: ['text'],
+ capabilities: ['reasoning', 'tool_calling'],
+ },
+ pricing: {
+ input: {
+ normal: 0.14,
+ cached: 0.014,
+ },
+ output: {
+ normal: 0.28,
+ },
+ },
+} as const satisfies ModelMeta
+
+const DEEPSEEK_REASONER = {
+ name: 'deepseek-reasoner',
+ context_window: 65_536,
+ supports: {
+ input: ['text', 'image'],
+ output: ['text'],
+ capabilities: ['reasoning'],
+ },
+ pricing: {
+ input: {
+ normal: 0.55,
+ cached: 0.055,
+ },
+ output: {
+ normal: 2.19,
+ },
+ },
+} as const satisfies ModelMeta
+
+const DEEPSEEK_CODER_V2_INSTRUCT = {
+ name: 'deepseek-coder',
+ context_window: 163_840,
+ supports: {
+ input: ['text'],
+ output: ['text'],
+ capabilities: ['structured_outputs', 'tool_calling'],
+ },
+ pricing: {
+ input: {
+ normal: 0.14,
+ cached: 0.014,
+ },
+ output: {
+ normal: 0.28,
+ },
+ },
+} as const satisfies ModelMeta
+
+/**
+ * DeepSeek Chat Models
+ * Based on DeepSeek's available models as of 2025
+ */
+export const DEEPSEEK_CHAT_MODELS = [
+ DEEPSEEK_V3.name,
+ DEEPSEEK_REASONER.name,
+ DEEPSEEK_CODER_V2_INSTRUCT.name,
+] as const
+
+/**
+ * Type-only map from DeepSeek chat model name to its supported input modalities.
+ * Used for type inference when constructing multimodal messages.
+ */
+export type DeepSeekModelInputModalitiesByName = {
+ [DEEPSEEK_V3.name]: typeof DEEPSEEK_V3.supports.input
+ [DEEPSEEK_REASONER.name]: typeof DEEPSEEK_REASONER.supports.input
+ [DEEPSEEK_CODER_V2_INSTRUCT.name]: typeof DEEPSEEK_CODER_V2_INSTRUCT.supports.input
+}
+
+/**
+ * Type-only map from DeepSeek chat model name to its provider options type.
+ * Since DeepSeek uses OpenAI-compatible API, we reuse OpenAI provider options.
+ */
+export type DeepSeekChatModelProviderOptionsByName = {
+ [K in (typeof DEEPSEEK_CHAT_MODELS)[number]]: DeepSeekProviderOptions
+}
+
+/**
+ * DeepSeek-specific provider options
+ * Based on OpenAI-compatible API options
+ */
+export interface DeepSeekProviderOptions {
+ /** Temperature for response generation (0-2) */
+ temperature?: number
+ /** Maximum tokens in the response */
+ max_tokens?: number
+ /** Top-p sampling parameter */
+ top_p?: number
+ /** Frequency penalty (-2.0 to 2.0) */
+ frequency_penalty?: number
+ /** Presence penalty (-2.0 to 2.0) */
+ presence_penalty?: number
+ /** Stop sequences */
+ stop?: string | Array
+ /** A unique identifier representing your end-user */
+ user?: string
+}
+
+// ===========================
+// Type Resolution Helpers
+// ===========================
+
+/**
+ * Resolve provider options for a specific model.
+ * If the model has explicit options in the map, use those; otherwise use base options.
+ */
+export type ResolveProviderOptions =
+ TModel extends keyof DeepSeekChatModelProviderOptionsByName
+ ? DeepSeekChatModelProviderOptionsByName[TModel]
+ : DeepSeekProviderOptions
+
+/**
+ * Resolve input modalities for a specific model.
+ * If the model has explicit modalities in the map, use those; otherwise use text only.
+ */
+export type ResolveInputModalities =
+ TModel extends keyof DeepSeekModelInputModalitiesByName
+ ? DeepSeekModelInputModalitiesByName[TModel]
+ : readonly ['text']
diff --git a/packages/typescript/ai-deepseek/src/text/text-provider-options.ts b/packages/typescript/ai-deepseek/src/text/text-provider-options.ts
new file mode 100644
index 00000000..3ace764e
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/text/text-provider-options.ts
@@ -0,0 +1,83 @@
+import type { FunctionTool } from '../tools/function-tool'
+
+/**
+ * DeepSeek Text Provider Options
+ *
+ * DeepSeek uses an OpenAI-compatible Chat Completions API.
+ * However, not all OpenAI features may be supported by DeepSeek.
+ */
+
+/**
+ * Base provider options for DeepSeek text/chat models
+ */
+export interface DeepSeekBaseOptions {
+ /**
+ * A unique identifier representing your end-user.
+ * Can help DeepSeek to monitor and detect abuse.
+ */
+ user?: string
+}
+
+/**
+ * DeepSeek-specific provider options for text/chat
+ * Based on OpenAI-compatible API options
+ */
+export interface DeepSeekTextProviderOptions extends DeepSeekBaseOptions {
+ /**
+ * Temperature for response generation (0-2)
+ * Higher values make output more random, lower values more focused
+ */
+ temperature?: number
+ /**
+ * Top-p sampling parameter (0-1)
+ * Alternative to temperature, nucleus sampling
+ */
+ top_p?: number
+ /**
+ * Maximum tokens in the response
+ */
+ max_tokens?: number
+ /**
+ * Frequency penalty (-2.0 to 2.0)
+ */
+ frequency_penalty?: number
+ /**
+ * Presence penalty (-2.0 to 2.0)
+ */
+ presence_penalty?: number
+ /**
+ * Stop sequences
+ */
+ stop?: string | Array
+ /**
+ * Enable thinking mode for chain-of-thought reasoning
+ * When enabled, the model will output reasoning content before the final answer
+ * Automatically enabled for deepseek-reasoner model
+ */
+ thinking?: boolean
+}
+
+/**
+ * Internal options interface for validation
+ * Used internally by the adapter
+ */
+export interface InternalTextProviderOptions extends DeepSeekTextProviderOptions {
+ model: string
+ stream?: boolean
+ tools?: Array
+}
+
+/**
+ * External provider options (what users pass in)
+ */
+export type ExternalTextProviderOptions = DeepSeekTextProviderOptions
+
+/**
+ * Validates text provider options
+ */
+export function validateTextProviderOptions(
+ _options: InternalTextProviderOptions,
+): void {
+ // Basic validation can be added here if needed
+ // For now, DeepSeek API will handle validation
+}
diff --git a/packages/typescript/ai-deepseek/src/tools/function-tool.ts b/packages/typescript/ai-deepseek/src/tools/function-tool.ts
new file mode 100644
index 00000000..8a473bc8
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/tools/function-tool.ts
@@ -0,0 +1,45 @@
+import { makeDeepSeekStructuredOutputCompatible } from '../utils/schema-converter'
+import type { JSONSchema, Tool } from '@tanstack/ai'
+import type OpenAI from 'openai'
+
+// Use Chat Completions API tool format (not Responses API)
+export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool
+
+/**
+ * Converts a standard Tool to DeepSeek ChatCompletionTool format.
+ *
+ * Tool schemas are already converted to JSON Schema in the ai layer.
+ * We apply DeepSeek-specific transformations for strict mode:
+ * - All properties in required array
+ * - Optional fields made nullable
+ * - additionalProperties: false
+ *
+ * This enables strict mode for all tools automatically.
+ */
+export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool {
+ // Tool schemas are already converted to JSON Schema in the ai layer
+ // Apply DeepSeek-specific transformations for strict mode
+ const inputSchema = (tool.inputSchema ?? {
+ type: 'object',
+ properties: {},
+ required: [],
+ }) as JSONSchema
+
+ const jsonSchema = makeDeepSeekStructuredOutputCompatible(
+ inputSchema,
+ inputSchema.required || [],
+ )
+
+ // Ensure additionalProperties is false for strict mode
+ jsonSchema.additionalProperties = false
+
+ return {
+ type: 'function',
+ function: {
+ name: tool.name,
+ description: tool.description,
+ parameters: jsonSchema,
+ strict: true, // Always use strict mode since our schema converter handles the requirements
+ },
+ } satisfies FunctionTool
+}
diff --git a/packages/typescript/ai-deepseek/src/tools/index.ts b/packages/typescript/ai-deepseek/src/tools/index.ts
new file mode 100644
index 00000000..cde61d66
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/tools/index.ts
@@ -0,0 +1,2 @@
+export { convertFunctionToolToAdapterFormat, type FunctionTool } from './function-tool'
+export { convertToolsToProviderFormat } from './tool-converter'
diff --git a/packages/typescript/ai-deepseek/src/tools/tool-converter.ts b/packages/typescript/ai-deepseek/src/tools/tool-converter.ts
new file mode 100644
index 00000000..c7471124
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/tools/tool-converter.ts
@@ -0,0 +1,17 @@
+import { convertFunctionToolToAdapterFormat } from './function-tool'
+import type { FunctionTool } from './function-tool'
+import type { Tool } from '@tanstack/ai'
+
+/**
+ * Converts an array of standard Tools to DeepSeek-specific format
+ * DeepSeek uses OpenAI-compatible API, so we primarily support function tools
+ */
+export function convertToolsToProviderFormat(
+ tools: Array,
+): Array {
+ return tools.map((tool) => {
+ // For DeepSeek, all tools are converted as function tools
+ // DeepSeek uses OpenAI-compatible API which primarily supports function tools
+ return convertFunctionToolToAdapterFormat(tool)
+ })
+}
diff --git a/packages/typescript/ai-deepseek/src/utils/client.ts b/packages/typescript/ai-deepseek/src/utils/client.ts
new file mode 100644
index 00000000..0bf7e843
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/utils/client.ts
@@ -0,0 +1,45 @@
+import OpenAI_SDK from 'openai'
+
+export interface DeepSeekClientConfig {
+ apiKey: string
+ baseURL?: string
+}
+
+/**
+ * Creates a DeepSeek SDK client instance using OpenAI SDK with DeepSeek's base URL
+ */
+export function createDeepSeekClient(config: DeepSeekClientConfig): OpenAI_SDK {
+ return new OpenAI_SDK({
+ apiKey: config.apiKey,
+ baseURL: config.baseURL || 'https://api.deepseek.com',
+ })
+}
+
+/**
+ * Gets DeepSeek API key from environment variables
+ * @throws Error if DEEPSEEK_API_KEY is not found
+ */
+export function getDeepSeekApiKeyFromEnv(): string {
+ const env =
+ typeof globalThis !== 'undefined' && (globalThis as any).window?.env
+ ? (globalThis as any).window.env
+ : typeof process !== 'undefined'
+ ? process.env
+ : undefined
+ const key = env?.DEEPSEEK_API_KEY
+
+ if (!key) {
+ throw new Error(
+ 'DEEPSEEK_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.',
+ )
+ }
+
+ return key
+}
+
+/**
+ * Generates a unique ID with a prefix
+ */
+export function generateId(prefix: string): string {
+ return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}`
+}
diff --git a/packages/typescript/ai-deepseek/src/utils/index.ts b/packages/typescript/ai-deepseek/src/utils/index.ts
new file mode 100644
index 00000000..c5b8d951
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/utils/index.ts
@@ -0,0 +1,10 @@
+export {
+ createDeepSeekClient,
+ getDeepSeekApiKeyFromEnv,
+ generateId,
+ type DeepSeekClientConfig,
+} from './client'
+export {
+ transformNullsToUndefined,
+ makeDeepSeekStructuredOutputCompatible,
+} from './schema-converter'
diff --git a/packages/typescript/ai-deepseek/src/utils/schema-converter.ts b/packages/typescript/ai-deepseek/src/utils/schema-converter.ts
new file mode 100644
index 00000000..3bce521d
--- /dev/null
+++ b/packages/typescript/ai-deepseek/src/utils/schema-converter.ts
@@ -0,0 +1,134 @@
+/**
+ * Recursively transform null values to undefined in an object.
+ *
+ * This is needed because DeepSeek's structured output (via OpenAI-compatible API) requires all fields to be
+ * in the `required` array, with optional fields made nullable (type: ["string", "null"]).
+ * When DeepSeek returns null for optional fields, we need to convert them back to
+ * undefined to match the original Zod schema expectations.
+ *
+ * @param obj - Object to transform
+ * @returns Object with nulls converted to undefined
+ */
+export function transformNullsToUndefined(obj: T): T {
+ if (obj === null) {
+ return undefined as unknown as T
+ }
+
+ if (Array.isArray(obj)) {
+ return obj.map((item) => transformNullsToUndefined(item)) as unknown as T
+ }
+
+ if (typeof obj === 'object') {
+ const result: Record = {}
+ for (const [key, value] of Object.entries(obj as Record)) {
+ const transformed = transformNullsToUndefined(value)
+ // Only include the key if the value is not undefined
+ // This makes { notes: null } become {} (field absent) instead of { notes: undefined }
+ if (transformed !== undefined) {
+ result[key] = transformed
+ }
+ }
+ return result as T
+ }
+
+ return obj
+}
+
+/**
+ * Transform a JSON schema to be compatible with DeepSeek's structured output requirements (OpenAI-compatible).
+ * DeepSeek requires:
+ * - All properties must be in the `required` array
+ * - Optional fields should have null added to their type union
+ * - additionalProperties must be false for objects
+ *
+ * @param schema - JSON schema to transform
+ * @param originalRequired - Original required array (to know which fields were optional)
+ * @returns Transformed schema compatible with DeepSeek structured output
+ */
+export function makeDeepSeekStructuredOutputCompatible(
+ schema: Record,
+ originalRequired: Array = [],
+): Record {
+ const result = { ...schema }
+
+ // Handle object types
+ if (result.type === 'object' && result.properties) {
+ const properties = { ...result.properties }
+ const allPropertyNames = Object.keys(properties)
+
+ // Transform each property
+ for (const propName of allPropertyNames) {
+ const prop = properties[propName]
+ const wasOptional = !originalRequired.includes(propName)
+
+ // Recursively transform nested objects/arrays/unions
+ if (prop.type === 'object' && prop.properties) {
+ properties[propName] = makeDeepSeekStructuredOutputCompatible(
+ prop,
+ prop.required || [],
+ )
+ } else if (prop.type === 'array' && prop.items) {
+ properties[propName] = {
+ ...prop,
+ items: makeDeepSeekStructuredOutputCompatible(
+ prop.items,
+ prop.items.required || [],
+ ),
+ }
+ } else if (prop.anyOf) {
+ // Handle anyOf at property level (union types)
+ properties[propName] = makeDeepSeekStructuredOutputCompatible(
+ prop,
+ prop.required || [],
+ )
+ } else if (prop.oneOf) {
+ // oneOf is not supported by DeepSeek - throw early
+ throw new Error(
+ 'oneOf is not supported in DeepSeek structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types',
+ )
+ } else if (wasOptional) {
+ // Make optional fields nullable by adding null to the type
+ if (prop.type && !Array.isArray(prop.type)) {
+ properties[propName] = {
+ ...prop,
+ type: [prop.type, 'null'],
+ }
+ } else if (Array.isArray(prop.type) && !prop.type.includes('null')) {
+ properties[propName] = {
+ ...prop,
+ type: [...prop.type, 'null'],
+ }
+ }
+ }
+ }
+
+ result.properties = properties
+ // ALL properties must be required for DeepSeek structured output
+ result.required = allPropertyNames
+ // additionalProperties must be false
+ result.additionalProperties = false
+ }
+
+ // Handle array types with object items
+ if (result.type === 'array' && result.items) {
+ result.items = makeDeepSeekStructuredOutputCompatible(
+ result.items,
+ result.items.required || [],
+ )
+ }
+
+ // Handle anyOf (union types) - each variant needs to be transformed
+ if (result.anyOf && Array.isArray(result.anyOf)) {
+ result.anyOf = result.anyOf.map((variant) =>
+ makeDeepSeekStructuredOutputCompatible(variant, variant.required || []),
+ )
+ }
+
+ if (result.oneOf) {
+ throw new Error(
+ 'oneOf is not supported in DeepSeek structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types',
+ )
+ }
+
+ return result
+}
diff --git a/packages/typescript/ai-deepseek/tests/deepseek-adapter.test.ts b/packages/typescript/ai-deepseek/tests/deepseek-adapter.test.ts
new file mode 100644
index 00000000..1c5757b3
--- /dev/null
+++ b/packages/typescript/ai-deepseek/tests/deepseek-adapter.test.ts
@@ -0,0 +1,292 @@
+import { describe, expect, it } from 'vitest'
+import { deepseekText, createDeepSeekText } from '../src/adapters/text'
+import {
+ deepseekSummarize,
+ createDeepSeekSummarize,
+} from '../src/adapters/summarize'
+import { DEEPSEEK_CHAT_MODELS } from '../src/model-meta'
+
+describe('DeepSeek Adapter', () => {
+ describe('Text Adapter Factory Functions', () => {
+ it('should create text adapter with explicit API key', () => {
+ const adapter = createDeepSeekText('deepseek-chat', 'test-api-key')
+
+ expect(adapter.name).toBe('deepseek')
+ expect(adapter.kind).toBe('text')
+ expect(adapter.model).toBe('deepseek-chat')
+ })
+
+ it('should throw error when creating text adapter without API key in env', () => {
+ // Temporarily remove any existing API key
+ const originalEnv = process.env.DEEPSEEK_API_KEY
+ delete process.env.DEEPSEEK_API_KEY
+
+ expect(() => {
+ deepseekText('deepseek-chat')
+ }).toThrow('DEEPSEEK_API_KEY is required')
+
+ // Restore original env
+ if (originalEnv) {
+ process.env.DEEPSEEK_API_KEY = originalEnv
+ }
+ })
+
+ it('should create text adapter with API key from environment', () => {
+ // Temporarily save any existing API key
+ const originalEnv = process.env.DEEPSEEK_API_KEY
+
+ // Set test API key in environment
+ process.env.DEEPSEEK_API_KEY = 'test-env-api-key'
+
+ const adapter = deepseekText('deepseek-chat')
+
+ expect(adapter.name).toBe('deepseek')
+ expect(adapter.kind).toBe('text')
+ expect(adapter.model).toBe('deepseek-chat')
+
+ // Restore original env
+ if (originalEnv) {
+ process.env.DEEPSEEK_API_KEY = originalEnv
+ } else {
+ delete process.env.DEEPSEEK_API_KEY
+ }
+ })
+ })
+
+ describe('Summarize Adapter Factory Functions', () => {
+ it('should create summarize adapter with explicit API key', () => {
+ const adapter = createDeepSeekSummarize('deepseek-chat', 'test-api-key')
+
+ expect(adapter.name).toBe('deepseek')
+ expect(adapter.kind).toBe('summarize')
+ expect(adapter.model).toBe('deepseek-chat')
+ })
+
+ it('should throw error when creating summarize adapter without API key in env', () => {
+ // Temporarily remove any existing API key
+ const originalEnv = process.env.DEEPSEEK_API_KEY
+ delete process.env.DEEPSEEK_API_KEY
+
+ expect(() => {
+ deepseekSummarize('deepseek-chat')
+ }).toThrow('DEEPSEEK_API_KEY is required')
+
+ // Restore original env
+ if (originalEnv) {
+ process.env.DEEPSEEK_API_KEY = originalEnv
+ }
+ })
+ })
+
+ describe('Model Constants', () => {
+ it('should export expected chat models', () => {
+ expect(DEEPSEEK_CHAT_MODELS).toEqual([
+ 'deepseek-chat',
+ 'deepseek-reasoner',
+ 'deepseek-coder',
+ ])
+ })
+
+ it('should accept all supported models in text adapter', () => {
+ for (const model of DEEPSEEK_CHAT_MODELS) {
+ expect(() => {
+ createDeepSeekText(model, 'test-api-key')
+ }).not.toThrow()
+ }
+ })
+
+ it('should accept all supported models in summarize adapter', () => {
+ for (const model of DEEPSEEK_CHAT_MODELS) {
+ expect(() => {
+ createDeepSeekSummarize(model, 'test-api-key')
+ }).not.toThrow()
+ }
+ })
+ })
+
+ describe('Type Safety', () => {
+ it('should have correct adapter types', () => {
+ const textAdapter = createDeepSeekText('deepseek-chat', 'test-key')
+ const summarizeAdapter = createDeepSeekSummarize(
+ 'deepseek-reasoner',
+ 'test-key',
+ )
+
+ // These assertions ensure TypeScript compilation and basic type checking
+ expect(typeof textAdapter.chatStream).toBe('function')
+ expect(typeof textAdapter.structuredOutput).toBe('function')
+ expect(typeof summarizeAdapter.summarize).toBe('function')
+ expect(typeof summarizeAdapter.summarizeStream).toBe('function')
+ })
+ })
+
+ describe('Thinking Mode', () => {
+ it('should automatically enable thinking mode for deepseek-reasoner model', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ expect(adapter.model).toBe('deepseek-reasoner')
+ // The thinking mode is enabled internally based on model name
+ })
+
+ it('should accept thinking option in model options', () => {
+ const adapter = createDeepSeekText('deepseek-chat', 'test-key')
+
+ // Test that thinking option can be passed in modelOptions
+ expect(() => {
+ // This would be used in actual chat call with modelOptions: { thinking: true }
+ adapter.model
+ }).not.toThrow()
+ })
+
+ it('should support reasoning via streaming chunks', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ // Test that the adapter can handle reasoning through streaming
+ // Reasoning is handled via 'thinking' stream chunks, not message metadata
+ expect(adapter.model).toBe('deepseek-reasoner')
+ expect(adapter.kind).toBe('text')
+ expect(adapter.name).toBe('deepseek')
+ })
+
+ it('should handle multi-turn conversations following API guidelines', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ // Test conversation history following DeepSeek API guidelines
+ // Per DeepSeek docs: reasoning content should NOT be sent back to API
+ const conversationHistory = [
+ {
+ role: 'user' as const,
+ content: 'What is 2+2?',
+ },
+ {
+ role: 'assistant' as const,
+ content: '4',
+ },
+ {
+ role: 'user' as const,
+ content: 'What about 3+3?',
+ },
+ ]
+
+ // Should handle standard conversation format
+ expect(conversationHistory).toBeDefined()
+ expect(adapter.model).toBe('deepseek-reasoner')
+ })
+
+ it('should handle thinking mode parameter correctly', () => {
+ const adapter = createDeepSeekText('deepseek-chat', 'test-key')
+
+ // Test that the adapter accepts thinking parameter in model options
+ const mockOptions = {
+ model: 'deepseek-chat' as const,
+ messages: [{ role: 'user' as const, content: 'test' }],
+ modelOptions: { thinking: true },
+ }
+
+ // Verify the adapter can process options with thinking enabled
+ expect(adapter.model).toBe('deepseek-chat')
+ expect(mockOptions.modelOptions.thinking).toBe(true)
+ })
+
+ it('should differentiate between reasoning and regular content', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ // Test message structure with reasoning metadata
+ const messageWithReasoning = {
+ role: 'assistant' as const,
+ content: 'The final answer is 42.',
+ metadata: {
+ reasoning_content:
+ 'Let me think step by step: First I need to understand the question...',
+ },
+ }
+
+ expect(messageWithReasoning.metadata.reasoning_content).toContain(
+ 'step by step',
+ )
+ expect(messageWithReasoning.content).not.toContain('step by step')
+ })
+
+ it('should support both automatic and manual thinking mode activation', () => {
+ // Test automatic activation with deepseek-reasoner
+ const autoAdapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+ expect(autoAdapter.model).toBe('deepseek-reasoner')
+
+ // Test manual activation with deepseek-chat
+ const manualAdapter = createDeepSeekText('deepseek-chat', 'test-key')
+ expect(manualAdapter.model).toBe('deepseek-chat')
+
+ // Both should be valid adapters
+ expect(autoAdapter.kind).toBe('text')
+ expect(manualAdapter.kind).toBe('text')
+ })
+
+ it('should handle tool calling with thinking mode', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ // Test tool call structure following standards
+ const toolCallMessage = {
+ role: 'assistant' as const,
+ content: '',
+ toolCalls: [
+ {
+ id: 'call_123',
+ type: 'function' as const,
+ function: {
+ name: 'get_weather',
+ arguments: '{"location": "San Francisco", "date": "2025-01-01"}',
+ },
+ },
+ ],
+ }
+
+ expect(toolCallMessage.toolCalls).toHaveLength(1)
+ expect(toolCallMessage.toolCalls[0].function.name).toBe('get_weather')
+ })
+
+ it('should handle standard conversation format per DeepSeek API', () => {
+ const adapter = createDeepSeekText('deepseek-reasoner', 'test-key')
+
+ // Test conversation flow following DeepSeek API standards
+ const conversation = [
+ {
+ role: 'user' as const,
+ content: 'What is 15 * 23?',
+ },
+ {
+ role: 'assistant' as const,
+ content: '345',
+ },
+ {
+ role: 'user' as const,
+ content: 'How did you calculate that?',
+ },
+ ]
+
+ const assistantMessage = conversation[1]
+ expect(assistantMessage.content).toBe('345')
+ expect(assistantMessage.role).toBe('assistant')
+
+ // Standard message format without metadata
+ const nextUserMessage = conversation[2]
+ expect(nextUserMessage.role).toBe('user')
+ expect(nextUserMessage.content).toBe('How did you calculate that?')
+ })
+ })
+
+ describe('Configuration', () => {
+ it('should accept custom base URL in config', () => {
+ expect(() => {
+ createDeepSeekText('deepseek-chat', 'test-key', {
+ baseURL: 'https://custom.deepseek.api.com',
+ })
+ }).not.toThrow()
+ })
+
+ it('should accept empty config object', () => {
+ expect(() => {
+ createDeepSeekText('deepseek-chat', 'test-key', {})
+ }).not.toThrow()
+ })
+ })
+})
diff --git a/packages/typescript/ai-deepseek/tsconfig.json b/packages/typescript/ai-deepseek/tsconfig.json
new file mode 100644
index 00000000..ea11c109
--- /dev/null
+++ b/packages/typescript/ai-deepseek/tsconfig.json
@@ -0,0 +1,9 @@
+{
+ "extends": "../../../tsconfig.json",
+ "compilerOptions": {
+ "outDir": "dist",
+ "rootDir": "src"
+ },
+ "include": ["src/**/*.ts", "src/**/*.tsx"],
+ "exclude": ["node_modules", "dist", "**/*.config.ts"]
+}
diff --git a/packages/typescript/ai-deepseek/vite.config.ts b/packages/typescript/ai-deepseek/vite.config.ts
new file mode 100644
index 00000000..77bcc2e6
--- /dev/null
+++ b/packages/typescript/ai-deepseek/vite.config.ts
@@ -0,0 +1,36 @@
+import { defineConfig, mergeConfig } from 'vitest/config'
+import { tanstackViteConfig } from '@tanstack/vite-config'
+import packageJson from './package.json'
+
+const config = defineConfig({
+ test: {
+ name: packageJson.name,
+ dir: './',
+ watch: false,
+ globals: true,
+ environment: 'node',
+ include: ['tests/**/*.test.ts'],
+ coverage: {
+ provider: 'v8',
+ reporter: ['text', 'json', 'html', 'lcov'],
+ exclude: [
+ 'node_modules/',
+ 'dist/',
+ 'tests/',
+ '**/*.test.ts',
+ '**/*.config.ts',
+ '**/types.ts',
+ ],
+ include: ['src/**/*.ts'],
+ },
+ },
+})
+
+export default mergeConfig(
+ config,
+ tanstackViteConfig({
+ entry: ['./src/index.ts'],
+ srcDir: './src',
+ cjs: false,
+ }),
+)